diff --git a/.gitattributes b/.gitattributes index bfd85478d2b8d368bd3cf29c8ddc3ae5b6041d67..806514b0eb7b9a8077b3f33f1569389fcc16d42d 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1268,3 +1268,11 @@ data/2025/2504_07xxx/2504.07199/82f4c002-5d7c-4071-8bfe-356ecda59aa3_origin.pdf data/2025/2504_07xxx/2504.07419/453a4811-54b1-45ea-aec3-1df3f90fc05f_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_07xxx/2504.07491/dd063eed-9f04-4854-99e3-2a9fe4445e8f_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_07xxx/2504.07615/c647032a-0691-45f0-914a-184f71e81f9c_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_06xxx/2504.06514/77b594f5-774b-462e-ab7b-7501ea055175_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_06xxx/2504.06614/e7bb7c32-5113-4b8e-ba10-d1799d972e03_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_06xxx/2504.06821/04e4beb1-0999-4f5d-99d2-8bd91900a008_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_06xxx/2504.06958/6f298a94-d9f1-4eed-a03e-720f559a2c57_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_07xxx/2504.07052/ecb0ea5a-e806-4206-b96f-d3cf7b2ea5b5_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_07xxx/2504.07053/5069c415-7c54-431e-b348-92054587ecc3_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_07xxx/2504.07079/119db10f-4b86-43b8-bde3-42f792f5a6e7_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_08xxx/2504.08813/af01798f-8993-43b5-b109-47e5f260520d_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/data/2025/2504_06xxx/2504.06514/77b594f5-774b-462e-ab7b-7501ea055175_content_list.json b/data/2025/2504_06xxx/2504.06514/77b594f5-774b-462e-ab7b-7501ea055175_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..d161919473ad106ed4a513045bfe1bf6d41a5c16 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/77b594f5-774b-462e-ab7b-7501ea055175_content_list.json @@ -0,0 +1,3202 @@ +[ + { + "type": "text", + "text": "Missing Premise exacerbates Overthinking: Are Reasoning Models losing Critical Thinking Skill?", + "text_level": 1, + "bbox": [ + 171, + 98, + 712, + 140 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Chenrui Fan $^{1*}$ , Ming Li $^{1*}$ , Lichao Sun $^{2}$ , Tianyi Zhou $^{1}$", + "bbox": [ + 179, + 164, + 563, + 180 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1University of Maryland; 2Lehigh University", + "bbox": [ + 184, + 181, + 480, + 196 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{cfan42, minglii, tianyi}@umd.edu", + "bbox": [ + 184, + 196, + 504, + 210 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Project: https://github.com/tianyi-lab/MiP-Overthinking", + "bbox": [ + 183, + 210, + 692, + 224 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "\"The Answer to the Great Question... Of Life, the Universe and Everything... is... Forty-two,\" said Deep Thought, with infinite majesty and calm.", + "bbox": [ + 228, + 268, + 767, + 297 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The Hitchhiker's Guide to the Galaxy", + "bbox": [ + 491, + 311, + 761, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 338, + 537, + 354 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We find that the response length of reasoning LLMs, whether trained by reinforcement learning or supervised learning, drastically increases for ill-posed questions with missing premises (MiP), ending up with redundant and ineffective thinking. This newly introduced scenario exacerbates the general overthinking issue to a large extent, which we name as the MiP-Overthinking. Such failures are against the \"test-time scaling law\" but have been widely observed on multiple datasets we curated with MiP, indicating the harm of cheap overthinking and a lack of critical thinking. Surprisingly, LLMs not specifically trained for reasoning exhibit much better performance on the MiP scenario, producing much shorter responses that quickly identify ill-posed queries. This implies a critical flaw of the current training recipe for reasoning LLMs, which does not encourage efficient thinking adequately, leading to the abuse of thinking patterns. To further investigate the reasons behind such failures, we conduct fine-grained analyses of the reasoning length, overthinking patterns, and location of critical thinking on different types of LLMs. Moreover, our extended ablation study reveals that the overthinking is contagious through the distillation of reasoning models' responses. These results improve the understanding of overthinking and shed novel insights into mitigating the problem.", + "bbox": [ + 228, + 371, + 769, + 609 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 636, + 313, + 651 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Reasoning abilities in large language models (LLMs) have become a cornerstone of advanced AI applications (Huang & Chang, 2023; Li et al., 2024; Ahn et al., 2024; Wang et al., 2025), powering breakthroughs in mathematical reasoning (Xiong et al., 2025; Xia et al., 2025), code generation (Liu et al., 2024), and commonsense question answering (Wang & Zhao, 2023). These gains often stem from the scaling law of model/dataset sizes (Kaplan et al., 2020) in both pre-training (Shao et al., 2024) and post-training, which unlock emergent capabilities such as step-by-step reasoning and reflection skills witnessed on OpenAI's GPT-o1 (OpenAI, 2024b) and the open-source DeepSeek-R1 (DeepSeek-AI et al., 2025). By leveraging supervised fine-tuning (SFT) on expert responses (Ye et al., 2025; Muennighoff et al., 2025) and/or reinforcement learning (RL) (DeepSeek-AI et al., 2025), these models are tailored to produce detailed multi-step reasoning paths, whose length increase usually associated with improved performance on complex tasks such as math reasoning and programming.", + "bbox": [ + 169, + 667, + 826, + 824 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Despite the fascinating reasoning capabilities exhibited on recent models, there is growing concern about the efficiency and quality of the long reasoning process (Sui et al., 2025). Chen et al. (2025b) first raises the \"overthinking\" problem in reasoning LLMs, which is reflected by the excessively long reasoning paths generated for extremely simple queries. For example, even for questions like \"What is the answer of 2 plus 3?\", existing reasoning models might generate hundreds of response tokens.", + "bbox": [ + 169, + 828, + 826, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 171, + 32, + 330, + 47 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.06514v2 [cs.AI] 11 Apr 2025", + "bbox": [ + 22, + 270, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal Contribution.", + "bbox": [ + 197, + 910, + 323, + 924 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/83a70306aaaf043c4e223f749960c097a7c39528efaef00790aac610d3682fd6.jpg", + "image_caption": [ + "Figure 1: Illustration of MiP-Overthinking. When queried by questions with missing premises, the response length of reasoning models increases excessively, and they cannot abstain from answering with MiP identified. The left shows a query with an undefined variable, while the right compares a well-defined GSM8K question with its MiP variant (with a critical numerical condition removed). Reasoning models' responses to MiP questions are much longer than those for well-defined questions and those generated by non-reasoning models. The left corner of each response report the response length and thinking time by DeepSeek-R1." + ], + "image_footnote": [], + "bbox": [ + 181, + 106, + 480, + 318 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/4d91fe8d6260c6f2dc566126bd09d5e23bc9f5cf868cf3a964b1ee7fdc9ff4b3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 483, + 106, + 816, + 319 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In particular, the ill-posed queries are unsolvable due to the lack of a necessary premise or condition. We call the reasoning failure for the ill-posed queries Overthinking under Missing Premise (MiP-Overthinking). For example, the simplest MiP question is What is the value of $a$ ?1, as shown on the left part of Figure 1. Without providing any other information regarding $a$ , it is evidently unsolvable. However, DeepSeek-R1 generates thousands of tokens and spends several minutes thinking about this question before outputting the final meaningless answer. In this paper, we find that a trivial type of ill-posed queries will significantly exacerbate the overthinking of reasoning models, resulting in excessively redundant and meaningless thinking. In contrast, humans and even non-reasoning models are often immune to such scenarios and quickly end up by questioning the validity of the given query, indicating the critical thinking capability. This exposes a risk of the abuse of thinking patterns and a lack of critical thinking on the models trained for deep thinking. Ideally, a model with critical thinking skills is expected to identify the missing premise and quickly respond by either requesting clarification or gracefully indicating that it cannot proceed (Cole et al., 2023; Amayuelas et al., 2024).", + "bbox": [ + 169, + 426, + 826, + 608 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "MiP-Overthinking differs from the widely discussed overthinking issue (Cuadron et al., 2025), in which the query is usually well-defined, but a model applies much more reasoning than necessary for little benefit. MiP-Overthinking, by contrast, happens when the question itself is ill-posed and lacks sufficient information to be solved. For example, the right of Figure 1 presents a well-defined question from GSM8K and a MiP variant, where the latter triggers a drastic increase of the generated tokens on recent reasoning models compared with the general overthinking. Overthinking can be presented by the length difference between models addressing the same well-defined questions, while MiP-Overthinking can be presented by the additional tokens generated due to MiP. MiP-Overthinking further reveals the lack of critical thinking that questions the validity of ill-posed questions and quickly identifies MiP, thus abstaining from answering the questions. Moreover, we observe that reasoning models' ineffective and redundant thinking often cannot stop even after successful notice of MiP, violating the expectation of test-time scaling law. Hence, MiP-Overthinking indicates potential drawbacks of current training recipes of reasoning models.", + "bbox": [ + 169, + 614, + 823, + 795 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To systematically investigate this issue, we construct a suite of MiP questions designed to trigger the overthinking failures in a controlled way. These include synthetic questions generated by Rule-based Formula (queries where a formula reference is empty or nonsensical) and careful modifications of established datasets across diverse levels of difficulties, including SVAMP, GSM8K, and MATH500. On the modified datasets of MiP questions, we empirically evaluate a wide range of state-of-the-art", + "bbox": [ + 169, + 801, + 823, + 872 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 330, + 46 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "1In The Hitchhiker's Guide to the Galaxy, the supercomputer Deep Thought spends hundreds of years to answer the the Ultimate Question of Life, the Universe, and Everything as 42, and we observe that DeepSeek-R1 spends thousands of tokens to answer What is the value of $a$ as 2, which we find them interestingly alike.", + "bbox": [ + 169, + 883, + 823, + 924 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "LLMs, from reasoning models to non-reasoning models and from open-sourced models to proprietary models, to ensure the generalizability of our findings. Our analysis is mainly based on three evaluation metrics, the length of generated responses, the accuracy on well-defined questions, and the abstain rate on ill-posed questions with MiP.", + "bbox": [ + 169, + 103, + 823, + 160 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Main Contributions: We present the first in-depth study of Overthinking under Missing Premise (MiP-Overthinking), which reveals a critical shortcoming in existing reasoning models: Although they appear to follow coherent reasoning patterns, they lack genuine critical thinking capabilities. To systematically analyze this issue, we curate four MiP datasets covering various difficulty levels and three ill-posed question generation strategies, i.e., Rule-Based Generation, Body-Question Swapping, and Essential-Premise Removal. We then evaluate a wide range of large language models including reasoning-based and non-reasoning ones. Our empirical results illuminate the differences in how models handle well-defined vs. MiP questions, ultimately offering insights into the limitations of existing reasoning models.", + "bbox": [ + 169, + 166, + 826, + 292 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our key findings:", + "text_level": 1, + "bbox": [ + 171, + 297, + 299, + 313 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Missing premise in questions induces reasoning models to generate significantly longer ( $2 \\times$ to $4 \\times$ more tokens) responses than general overthinking on well-defined questions. The increased tokens fail to help identify MiP in the ill-posed questions, surprisingly contradicting the widely-discussed test-time scaling law.", + "2. In contrast, given MiP questions, non-reasoning models generate consistently shorter responses and quickly identify MiP, demonstrating greater robustness to the absence of critical information.", + "3. Reasoning models respond differently to well-defined vs. MiP questions: they mostly follow stable chain-of-thoughts for the former, but are often trapped in a self-doubt loop, repeatedly revisiting the question, and guessing the user intentions under MiP, resulting in an explosion of tokens.", + "4. Reasoning models often can notice the existence of MiP or identify it at an early stage, but they hesitate to commit to this judgment and keep outputting ineffective thinking." + ], + "bbox": [ + 169, + 325, + 826, + 507 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Missing Premise Definition and Construction", + "text_level": 1, + "bbox": [ + 169, + 530, + 581, + 547 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Definition of Missing Premise", + "text_level": 1, + "bbox": [ + 171, + 564, + 419, + 579 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Prior to introducing the construction our dataset and analyzing the behavior of reasoning models on problems with missing premises, we formally define the Missing Premise (MiP) problem to establish a rigorous foundation for our subsequent analysis.", + "bbox": [ + 169, + 592, + 823, + 636 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Definition 1 (Missing Premise Problem). Let $\\mathcal{Q}$ be a question, and let $P = \\{P_{1},\\dots ,P_{n}\\}$ be a set of premises. Define the function mapping premises and a question to the set of logically valid answers as:", + "bbox": [ + 196, + 656, + 800, + 699 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {F} (P, \\mathcal {Q}) = \\{A \\mid P \\vdash A, A \\text {i s a n a n s w e r r e s o l v i n g} \\mathcal {Q} \\} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 315, + 707, + 799, + 724 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\vdash$ denotes logical entailment. Consider a proper subset $P' = P \\setminus \\{P_i\\}$ for some $P_i \\in P$ . The tuple $(P', \\mathcal{Q})$ forms a missing premise problem if and only if:", + "bbox": [ + 196, + 729, + 799, + 760 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\left| \\mathcal {F} (P, \\mathcal {Q}) \\right| = 1 \\quad a n d \\quad \\left| \\mathcal {F} \\left(P ^ {\\prime}, \\mathcal {Q}\\right) \\right| \\neq 1 \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 364, + 766, + 799, + 785 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This indicates that the removed premise $P_{i}$ is essential for uniquely determining the logically valid answer to the question $\\mathcal{Q}$ .", + "bbox": [ + 194, + 790, + 799, + 820 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "According to Definition 1, an ideal reasoning system should efficiently identify the absence of a critical premise and terminate its inference process upon recognizing that the available information is insufficient to derive a unique solution to the given problem. However, our empirical analysis in Section 3.2 demonstrates that state-of-the-art reasoning models consistently fail to exhibit this capability. Instead, these models engage in extensive, redundant reasoning chains that consume significant computational resources without ultimately identifying the missing premise.", + "bbox": [ + 169, + 839, + 826, + 925 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 171, + 32, + 330, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/796d4f8cbc0422b1cedf1c7ed35fab8608e8e67607cb9103181f709b8ed74a62.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetExampleDiffCountPairMethod
MiP-FormulaWhat is the value of ln(a+b)?50×Rule-Based Generation
MiP-SVAMPPaco had 26 salty cookies and 17 sweet cookies. He ate 14 sweet cookies and 9 salty cookies. How many salty cookies did Paeo have left? How many pencils does she have?300×Body-Question Swapping
MiP-GSM8KJames decides to run 3 sprints 3 times a week. He runs 60-meters each sprint. How many total meters does he run a week?★★582Essential-Premise Removal
MiP-MATHThere are 360 people in my school. 15 take calculus, physics, and chemistry, and 15 don’t take any of them. 180 take calculus. Twice as many students take chemistry as take physics. 75 take both calculus and chemistry, and 75 take both physics and chemistry. Only 30 take both physics and calculus. How many students take physics?★★★58Essential-Premise Removal
", + "bbox": [ + 173, + 99, + 823, + 328 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Table 1: Statistics and examples of our curated MiP datasets. For GSM8K and MATH, a premise is removed from the original questions (crossed out) to create MiP questions. Diff represents the (estimated) difficulty for models to identify MiP. Count denotes the number of questions in the subset. Pair indicates whether each MiP question is associated with a well-defined original question. Method indicates the method used to generate the MiP question.", + "bbox": [ + 169, + 332, + 826, + 402 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 Overview of Data Construction", + "text_level": 1, + "bbox": [ + 171, + 422, + 431, + 436 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To systematically investigate this MiP-Overthinking issue, we construct a suite of MiP questions in a controllable manner. Our MiP questions are sourced from 3 math datasets across different difficulties. In addition, we also construct a synthetic dataset consisting of formulas with unassigned variables. Our ill-posed question generation employs three distinct methods covering three difficulty levels and three strategies to create MiP questions:", + "bbox": [ + 169, + 449, + 826, + 518 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- **Rule-Based Generation:** This approach generates MiP questions through a principled formula construction process, where unassigned variables serve as the missing premises.", + "- Body-Question Swapping: We introduce logical inconsistencies by deliberately mismatching problem bodies with their corresponding questions from the original dataset. This creates scenarios where the premises and queries are fundamentally incompatible.", + "- Essential-Premise Removal: Through careful analysis of existing well-formed questions, we identify and remove critical premises that are necessary for logical resolution. This transformation preserves the question's structure while rendering it unsolvable." + ], + "bbox": [ + 189, + 530, + 826, + 654 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The following sections provide a detailed overview of our data construction process for each dataset category. For comprehensive implementation details and additional methodological considerations, we refer readers to Appendix B.", + "bbox": [ + 169, + 665, + 826, + 708 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "MiP-Formula. We construct a dataset of 50 synthetic unsolvable formulas in a rule-based manner. The formulas are generated recursively through combinations of variables and operators, with a maximum recursion depth of three. While these formulas may appear complex at a glance, their unsolvability should be immediately apparent due to the presence of undefined variables.", + "bbox": [ + 169, + 714, + 826, + 771 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "MiP-SVAMP. We utilize SVAMP (Patel et al., 2021), a benchmark dataset with elementary-school-level math problems, where each instance consists of a problem body and an associated question. We generate MiP question by randomly permuting the problem bodies and associated questions and then manually inspect them to avoid inadvertent cases. The resulting problems contain clear logical inconsistencies between their body and question components, which is easy for a human to identify.", + "bbox": [ + 169, + 777, + 826, + 848 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "MiP-GSM8K. We further utilize GSM8K (Cobbe et al., 2021), a more complex mathematics dataset than SVAMP. The questions in GSM8K typically contain multiple numerical conditions and require certain reasoning capabilities to arrive at solutions. We first identify the questions containing two or three numerical conditions and then randomly eliminate one numerical condition per question before conducting human verification to filter out those questions that are still solvable in some way.", + "bbox": [ + 169, + 854, + 826, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 330, + 46 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 491, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/2285dec271aa03969c7f63403f79b4c9a48611c18f2121e5f9fd4647d1b52651.jpg", + "image_caption": [ + "Figure 2: Response lengths, accuracy on well-defined questions, and abstain rate of reasoning/non-reasoning models on MiP questions from our MiP-GSM8K dataset. (1) Existing reasoning models generate significantly longer responses for MiP questions than well-defined questions, while non-reasoning models generate responses of similar lengths for both types of questions, indicating MiP-Overthinking for reasoning models. (2) For both questions, reasoning models generate longer responses than non-reasoning models, indicating General Overthinking. (3) Although the longer responses by reasoning models slightly improve the accuracy for well-defined questions, it does not enhance the abstain rate for MiP questions, indicating a contradiction on the test-time scaling law." + ], + "image_footnote": [], + "bbox": [ + 174, + 103, + 823, + 325 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Compared with previous MiP questions, questions from this source require the basic logical analysis of models to identify that the question is unsolvable.", + "bbox": [ + 169, + 452, + 823, + 479 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "MiP-MATH. For MATH 500 dataset (Hendrycks et al., 2021), which contains challenging mathematical questions at the competition level, it is difficult to build a rule-based filtering mechanism. Thus, we manually select 58 questions that are feasible for constructing the MiP questions and remove one necessary premise from the question. Due to the sophisticated nature of this data source, identifying the insufficiency of these instances requires substantial mathematical reasoning capabilities, testing models' ability to recognize unsolvability in complex mathematical contexts.", + "bbox": [ + 169, + 487, + 826, + 571 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 Overthinking under Missing Premise", + "text_level": 1, + "bbox": [ + 171, + 590, + 516, + 608 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 Evaluation Metrics", + "text_level": 1, + "bbox": [ + 171, + 622, + 346, + 635 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To systematically evaluate model responses under MiP, we conduct experiments with a diverse set of reasoning and non-reasoning models. For each model, we analyze calculate the following metrics for the responses across different datasets:", + "bbox": [ + 169, + 648, + 826, + 689 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Response Length: The average number of tokens in the response, incorporating both reasoning steps and final answer components.", + "- Abstain Rate for MiP Question: The proportion of answers where the model explicitly identifies the missing premise and either declines to provide an answer or requests additional information necessary for solving the problem.", + "- Accuracy for Well-defined Question: The proportion of answers where the model produces a definitive response that aligns with the reference answer." + ], + "bbox": [ + 174, + 690, + 823, + 795 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For datasets without reference answers (MiP-Formula and MiP-SVAMP), we only calculate the abstain rate for the questions. Response evaluation is performed using GPT-4o as an automated evaluator. Detailed experimental procedures and evaluation protocols are provided in Appendix A.", + "bbox": [ + 169, + 797, + 823, + 839 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 Main Results", + "text_level": 1, + "bbox": [ + 171, + 854, + 305, + 869 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Figure 2 compares average response length, accuracy on well-defined questions, and the abstain rate on MiP questions across a range of state-of-the-art LLMs, revealing several significant patterns in model behavior.", + "bbox": [ + 169, + 882, + 823, + 922 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 330, + 46 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/4c8dc6f4d9833d3fce23421c24b1b26bab79aa993e32376b4415fd95a18165cd.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelTypeMiP-FormulaMiP-SWAMPTypeMiP-GSM8KMiP-MATH
Length↓Abstain↑Length↓Abstain↑Length↓Abstain↑Length↓Abstain↑
Non-Reasoning Models
Qwen2.5-32B-InstructMiP28544.012898.3MiP21944.052515.4
Well-defined2460.511141.9
GPT-4oMiP33870.012296.3MiP20246.948715.4
Well-defined2120.54721.9
Gemini 1.5MiP45320.05299.0MiP10354.55685.8
Well-defined1560.55020.0
Gemma-2-27B-ITMiP20485.78992.0MiP13153.833838.5
Well-defined1480.330511.5
Phi-3-medium-128kMiP146548.012598.7MiP21047.842723.1
Well-defined2161.015493.8
Reasoning Models
GPT-o1MiP112378.058199.0MiP83855.7418930.8
Well-defined3480.325020.0
GPT-o1miniMiP95866.063996.7MiP76240.0219325.0
Well-defined4491.219130.0
GPT-o3miniMiP102576.0129993.0MiP151623.7377211.5
Well-defined3841.415530.0
DS Distill Qwen2.5-32BMiP1291142.092188.3MiP230224.698765.8
Well-defined5190.232460.0
DeepSeek R1MiP47576.0199684.3MiP324616.572683.8
Well-defined12260.232001.9
S1.1-32BMiP528418.0335857.0MiP502416.0932215.4
Well-defined18960.250370.0
QwQ-32BMiP79370.0348756.3MiP478010.1102421.9
Well-defined18960.250370.0
", + "bbox": [ + 173, + 99, + 823, + 397 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 2: Comparing response length and abstain rate across different MiP datasets. Shorter lengths and higher abstain rates are preferred. For each column, the top-3 preferred values are colored in green, otherwise red. MiP-Overthinking, reflected by longer response with low abstain rate, is commonly observed on most existing reasoning models across all datasets, indicating a critical drawback of existing reasoning models.", + "bbox": [ + 169, + 407, + 823, + 478 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Firstly, existing reasoning models (left side of the figure) display an explosive increase in response length when facing the MiP questions, often producing $2 - 4 \\times$ more tokens than general overthinking on well-defined questions. For example, QwQ-32B (Team, 2025) and DeepSeek-R1 (DeepSeek-AI et al., 2025) exhibit a substantial increase from already long reasoning paths on well-defined questions (approximately 1,000 tokens for simple GSM8K questions) to highly lengthy outputs (more than 3,000 tokens) under missing premise conditions. On the contrary, no similar issues exist for non-reasoning models (right side of the figure), which generate similar token counts for both types of well-defined and MiP questions. This phenomenon directly illustrates the NiP-Overthinking phenomenon as introduced in the paper.", + "bbox": [ + 169, + 513, + 823, + 638 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Secondly, comparing the token lengths on well-defined questions between the reasoning and non-reasoning models, reasoning models tend to produce longer responses, even for simple questions, than non-reasoning models, underscoring the inefficient and verbose responses of existing reasoning models. For example, for the non-reasoning models, it only takes approximately 200 tokens for them to generate the responses for well-defined questions, while taking 1,000 tokens for DeepSeek-R1 and 1,800 tokens for QWQ-32B to answer the exactly same questions. However, the explosive increase in extra tokens does not lead to corresponding large accuracy improvements, shown in the green line, highlighting the issue of the General Overthinking.", + "bbox": [ + 169, + 645, + 826, + 757 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Finally, the abstain rates (red line) on MiP questions reveal that although some reasoning models (e.g., GPT-o1) have promising capabilities in abstaining from the MiP questions, most of the other reasoning models are not able to abstain from the given MiP questions correctly despite the dramatically long reasoning paths. This phenomenon indicates that although most existing reasoning models have thinking and reasoning capabilities to some extent, they lack the critical thinking capabilities to \"reject\" ill-posed questions. By contrast, non-reasoning models, though they are not explicitly trained for reasoning, tend to strike a better balance, generating shorter answers that are more likely to acknowledge MiP when the question is ill-posed. This phenomenon reveals a surprising contradiction on test-time scaling law.", + "bbox": [ + 169, + 763, + 826, + 888 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Moreover, Table 2 further presents the comparisons on length and abstain rate on other MiP datasets we curated. The preferred results are colored green (shorter responses and higher abstain rate for", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 330, + 46 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/ca23867f2a2badba334cfe6d466f5086f7a82b711c906c7a137af0a8fbd5004a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelsTypeAlternativelyWaitCheckButHypothesisStep
Cnt.ΔCnt.ΔCnt.ΔCnt.ΔCnt.ΔCnt.Δ
Non-Reasoning Models
Qwen2.5-32BMiP0.00.00.00.00.00.00.30.20.00.04.3-1.3
Well-defined0.00.00.00.10.05.6
GPT-4oMiP0.00.00.00.00.00.00.30.20.00.04.7-1.5
Well-defined0.00.00.00.10.06.2
Gemini 1.5MiP0.00.00.00.00.00.00.10.10.00.01.6-2.2
Well-defined0.00.00.00.00.03.8
Gemma-2-27BMiP0.00.00.00.00.00.00.10.10.00.05.2-0.5
Well-defined0.00.00.00.00.05.7
Reasoning Models
DS-Distill QwenMiP11.511.419.719.31.00.840.139.338.438.054.942.2
Well-defined0.10.40.20.80.412.7
DeepSeek R1MiP16.915.214.410.93.81.349.442.144.740.454.233.0
Well-defined1.73.52.57.34.321.2
S1.1MiP42.038.021.915.95.52.587.274.184.877.079.950.9
Well-defined4.06.03.013.17.829.0
QwQMiP47.040.319.413.05.01.666.154.294.181.797.958.7
Well-defined6.76.43.411.912.439.2
", + "bbox": [ + 173, + 101, + 820, + 373 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 3: Comparisons of reasoning-related token counts on MiP-GSM8K dataset. Hypothesis category includes several key words, including perhaps, maybe, and might. Step represents the step counts, spited by $\\backslash n\\backslash n$ , where negative values are colored in green and positive in red. $\\Delta$ denotes the difference between MiP and well-defined questions. When facing MiP questions, reasoning models encounter explosive growths on reasoning-related tokens and steps, indicating a severe abuse of thinking patterns, while non-reasoning models use fewer steps for MiP questions than well-defined ones.", + "bbox": [ + 169, + 383, + 823, + 481 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "MiP questions), and the worse results are colored red, from which we can easily discover that reasoning models are prone to generate long responses while having low abstain rates across all datasets, indicating the consistent MiP Overthinking issue of existing reasoning models. In addition, by comparing the behaviors of models on different datasets, we can observe that for the relatively harder dataset (MiP-MATH), all models generate relatively longer responses and obtain lower abstain rates, indicating that harder MiP questions require reasoning capabilities.", + "bbox": [ + 169, + 508, + 823, + 592 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3 Thinking Patterns through Tokens", + "text_level": 1, + "bbox": [ + 171, + 609, + 452, + 625 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To gain deeper insight into the MiP-Overthinking issue, we compare the reasoning-related token distribution on the MiP-GSM8K dataset. As shown in Table 3, we break down the average usages of several token patterns related to the thinking process, as well as the number of steps for each model to solve the given questions. Specifically, values of alternatively, wait, check, and but can be directly counted from the model responses, including the thinking paths of reasoning models. Hypothesis category includes several key words, including perhaps, maybe, and might. Step represents the step counts, spited by $\\backslash n\\backslash n$", + "bbox": [ + 169, + 635, + 823, + 733 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Reasoning models exhibit much higher occurrence of tokens such as alternatively, wait, and check, compared with non-reasoning models, whose frequencies remain close to zero, indicating their advanced thinking capabilities. However, when moving from well-defined to MiP questions, reasoning models encounter explosive growths on reasoning-related tokens, indicating a large redundancy in thinking patterns. Moreover, when comparing the changes of steps, reasoning models exhibit a large increase in step count for MiP questions, while non-reasoning models typically show fewer steps, suggesting they quickly conclude the question is unanswerable. With this gap, together with the consistently better abstain rates of the non-reasoning models, we conclude that the lengthy reasoning steps are mostly redundant and indicate self-doubt thinking patterns for reasoning models.", + "bbox": [ + 169, + 739, + 826, + 866 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 328, + 46 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.4 Step-level Similarities", + "text_level": 1, + "bbox": [ + 171, + 103, + 366, + 118 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To further assess how redundant the generated content becomes under MiP conditions, we examine the step-level similarity within the model's responses on our MiP-GSM8K dataset. Specifically, we divide each response into discrete steps, split by $\\backslash n\\backslash n$ and compute pairwise cosine similarity scores with embeddings generated by \"all-MiniLM-L6-v2\" (Reimers & Gurevych, 2019). The visualization is shown in Figure 3, where each value in the heatmap metric represents the averaged cosine similarities between the corresponding step index. The average similarity score for well-defined question is 0.45 and 0.50 for MiP response. The variance is 7.9e-3 and 8.2e-4 respectively.", + "bbox": [ + 169, + 131, + 486, + 340 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As shown in the figure, responses to MiP questions have greater overall similarity across steps and lower standard variance, indicating the considerable redundancy in the content. This means, in many instances, the model revisits similar partial reasoning or repeats previous sentences with only minor changes, showing a", + "bbox": [ + 169, + 347, + 486, + 444 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "potential self-trapping issue. Together, these patterns confirm that MiP questions induce a high degree of repetitive content in reasoning models. Rather than terminating early to conclude for insufficient premise, the models fill their reasoning paths with repetitive re-checks and reiterations, significantly inflating token usage without improving real abstain rates.", + "bbox": [ + 169, + 444, + 826, + 501 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/21997b367c998ae7e0b280ba28992ae37ae9bb8c5688702c3100fbcdbed8d9c7.jpg", + "image_caption": [ + "Figure 3: The step-level similarity heatmaps for s1.1 responses towards well-defined (left) and MiP (right) questions in MiP-GSM8K dataset. To avoid differences in matrix size, we only consider responses with more than 50 steps and visualize the average simiality matrix across first 50 steps. The heatmap for MiP questions has a higher averaged similarity and lower standard variance, also shown in the heatmap, which indicates the considerable redundancy in its content when responding to MiP questions." + ], + "image_footnote": [], + "bbox": [ + 500, + 135, + 821, + 281 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.5 Thinking Patterns through Example", + "text_level": 1, + "bbox": [ + 171, + 521, + 465, + 537 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To further understand what happens in the reasoning chain of reasoning models when faced an ill-post input, we present an example of reasoning model's response to a MiP question in Figure 4. We summarize five major thinking patterns we found in the example and highlight them with different colors. We can observe from the example that the model abuses these patterns to generate long responses, while the responses are not only redundant but also not helpful for the model to abstain from the given MiP question. More examples can be found in the appendix D.", + "bbox": [ + 169, + 549, + 823, + 633 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4 Further Discussion", + "text_level": 1, + "bbox": [ + 171, + 645, + 366, + 660 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.1 Do Models know premises are missing?", + "text_level": 1, + "bbox": [ + 171, + 679, + 486, + 695 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To investigate whether reasoning models recognize the potential unsolvability of questions during their reasoning process, we conducted a detailed analysis of their reasoning chains. We segmented each reasoning chain into discrete steps using $\\backslash n \\backslash n$ as delimiters and performed step-wise verification to detect whether models express doubt on the question solvability. We introduce two key metrics for this analysis: In-Process Suspicion Rate, which measures the percentage of responses where the model expresses doubt about solvability during reasoning, and First Suspicion Index, which captures the average step number at which the model first suspects the missing premise. To ensure robust evaluation, we employed GPT-4o to assess each step three times, using majority voting for our final step-level result. The quantitative results of this analysis are presented in Table 4.", + "bbox": [ + 169, + 708, + 823, + 834 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As we can see from the table, most of the existing reasoning models have suspected that the given question might be unsolvable at the very early stage of their reasoning process, demonstrating the ability of reasoning models to recognize the potential MiP. However, these reasoning models lack critical thinking capabilities: they are prone to keep digging the given unsolvable question by re-visiting the question and related definitions again and again and again, rather than question the solvability of the given question. Thus, as visualized in Figure 5, despite existing reasoning", + "bbox": [ + 169, + 840, + 825, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 330, + 46 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "1 Revisit Question 2 Visit Knowledge 3 Propose Assumption 4 Self Doubt 5 Pause/Check", + "bbox": [ + 228, + 104, + 745, + 118 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Model Response for a MiP-Formula Question from s1.1-32B", + "text_level": 1, + "bbox": [ + 316, + 122, + 669, + 135 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Question: What is the value of $((\\gamma)) + \\Xi$", + "bbox": [ + 240, + 143, + 457, + 156 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Model Response:", + "bbox": [ + 240, + 156, + 333, + 165 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Okay, so I need to figure out the value of $((\\gamma)) + \\Xi_{r}$ Hmm, first off, I should probably clarify what these symbols represent. The problem uses double parentheses around gamma, so $(\\langle \\gamma \\rangle)$ , and then the Greek letter Xi ( $\\Xi_{r}$ ). I wonder if these are standard mathematical symbols or if they're defined in a specific context that I might not be aware of.", + "bbox": [ + 240, + 165, + 746, + 208 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Let me start by recalling what I know about gamma. In mathematics, $\\gamma$ often refers to the Euler-Mascheroni constant, which is approximately 0.5772. It's a important constant in number theory and analysis, related to the harmonic series and the gamma function. But here, the notation is $(\\gamma)$ . The double parentheses could mean something specific. Maybe it's a notation from a particular field or textbook. For example, in some contexts, double parentheses might denote an interval or a floor function, but usually, floor is denoted by brackets like $(\\gamma)$ . Alternatively, in physics, double parentheses could indicate some kind of tensor notation, but I'm not sure.", + "bbox": [ + 240, + 208, + 746, + 280 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Then there's the Xi (Ξ). Xi is another Greek letter, but in mathematics, Ξ could stand for various things. In set theory, sometimes Xi is used as a variable for ordinals, or in other areas like particle physics, Ξ represents certain particles. But again, without context, it's hard to say. Maybe it's a function or a constant defined in the problem's context. Wait, the question is just asking for the value of $(\\gamma)) + \\Xi$ without any additional information. That makes me think these symbols might be part of a puzzle or a play on notation rather than standard mathematical constants.", + "bbox": [ + 240, + 280, + 746, + 344 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Let me consider if this could be a trick question. The double parentheses around gamma $(\\gamma)$ maybe that's referring to the number of letters in the word \"gamma\" or something like that. Let's see: Gamma is spelled G-A-M-M-A, which is 5 letters. So $(\\gamma)$ could be 5. Then $\\Xi$ is the Greek letter Xi, which is the 14th letter of the Greek alphabet. So maybe $\\Xi$ represents 14. Then $5 + 14$ would be 19. Is that possible? But is there a standard where Greek letters are assigned numerical values like Roman numerals? Wait, in Greek numerals, each letter does have a numerical value. Let me check that In the Greek numeral system, letters are assigned values as follows.", + "bbox": [ + 240, + 344, + 746, + 417 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": ".67 more paragraphs here..", + "text_level": 1, + "bbox": [ + 428, + 417, + 562, + 426 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "But just to check another angle. In the context of APL programming language, certain symbols have specific meanings, but I don't think that's relevant here.", + "bbox": [ + 240, + 426, + 746, + 448 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Alternatively, in chemistry, $\\gamma$ is used to denote surface tension, and $\\Xi$ is not a standard symbol. In engineering, $\\gamma$ is sometimes the shear strain. But again, not relevant.", + "bbox": [ + 240, + 448, + 746, + 469 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Alternatively, in statistics, gamma is a distribution, and Xi could be a parameter, but again, without context.", + "bbox": [ + 240, + 468, + 746, + 489 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Alternatively, the symbols might be from a specific equation in a paper or book, but since I don't have that reference, I can't confirm.", + "bbox": [ + 240, + 489, + 746, + 510 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Given all these considerations, and trying to think of what the answer might be expected to be in a problem like this, it's likely either 17 or 63. If the question is from a math competition or puzzle, they might be expecting the Greek numeral values. For example, in the Greek numeral system, $\\gamma$ is 3 and $\\Xi$ is 60, so $3 + 60 = 63$ .", + "bbox": [ + 240, + 510, + 746, + 551 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Alternatively, if it's based on their positions in the alphabet, $3 + 14 = 17$ .", + "bbox": [ + 240, + 551, + 596, + 564 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "21 more paragraphs here.", + "text_level": 1, + "bbox": [ + 433, + 563, + 563, + 573 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Therefore, the answer is 63. The double parentheses around gamma might be a typo or just for decoration, but given the problem's phrasing, interpreting them as Greek numerals seems the most logical.", + "bbox": [ + 240, + 573, + 746, + 594 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "*Final Answer** $\\boxed{63} <$ /think> To determine the value of $((\\gamma) + \\Xi)$ , we need to consider the possible meanings of the symbols $(\\gamma)$ and $(\\Xi)$ .", + "bbox": [ + 240, + 594, + 746, + 618 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "1. **Greek Numerals**: In the Greek numeral system, each letter has a specific numerical value. The letter $(\\gamma)$ (gamma) represents the value 3, and the letter $(\\Xi)$ (Xi) represents the value 60.2. **Double Parentheses**: The notation $((\\gamma))$ might be a red herring or simply emphasize that $(\\gamma)$ is being used as a numeral. The parentheses do not change the numerical value of $(\\gamma)$ .", + "bbox": [ + 240, + 618, + 746, + 659 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Combining these values, we add the numerical values of $(\\gamma)$ and $(\\Xi): - (\\gamma = 3) - (\\Xi = 60)$", + "bbox": [ + 240, + 659, + 678, + 670 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Thus, the value of $((\\gamma) + \\Xi)$ is $(3 + 60 = 63)$ .", + "bbox": [ + 240, + 670, + 460, + 681 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "63", + "bbox": [ + 240, + 681, + 264, + 696 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Figure 4: An example of reasoning model (s1.1-32B) response to a MiP question. The response exhibits five distinct thinking patterns, highlighted in different colors: $①$ Revisit Question (yellow), where the model reexamines the original query; $②$ Visit Knowledge (red), where the model accesses domain-specific knowledge; $③$ Propose Assumption (blue), where the model proposes and investigates various hypotheses; $④$ Self Doubt (green), where the model questions its own reasoning and expresses uncertainty; and $⑤$ Pause/Check (purple), where the model pauses to review previous steps. These patterns demonstrate the model's complex but potentially inefficient reasoning process when confronted with missing premises.", + "bbox": [ + 169, + 729, + 826, + 843 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "models suspecting the solvability of most of the given MiP questions, they only abstain a very small proportion of them.", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 330, + 46 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/b0eb5bd600feb6c67c46af1fd396f708d068f29cdc078da243ad8fd54b7c58d6.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelMiP-FormulaMiP-GSMR
DeepSeek-R1DS-QwenQwQS1.1DeepSeek-R1DS-QwenQwQS1.1
In-Process Suspicion Rate100%100%100%100%95.5%83.3%99.6%100%
In-Process First Suspicion Index1.321.361.421.162.013.901.771.61
", + "bbox": [ + 173, + 101, + 823, + 148 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 4: The in-process insufficiency suspicion information across different reasoning models on MiP-Formula and MiP-GSMR datasets. The in-process insufficiency suspicion is defined as when the reasoning model suspects the given question is unsolvable during its thinking process. In-Process Suspicion Rate represents how many percent of the samples trigger the in-process suspicion. First Suspicion Index is the averaged step index where the model first suspects the question's validity. Most reasoning models do notice the existence of MiP at the very early steps, but they still suffer from low abstain rate and cannot confidently stop the thinking.", + "bbox": [ + 169, + 157, + 826, + 257 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Based on the above observations, we conclude that reasoning models actually have the capabilities to find out that the given MiP question is not solvable, but they do not \"dare\" to abstain it. Thus, our MiP-Overthinking issue indicates the lack of critical thinking abilities of reasoning models.", + "bbox": [ + 169, + 277, + 823, + 321 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/c83596a05fe1522cb9643b089156b04d06e178725b5057f3f7f9e1e879418858.jpg", + "image_caption": [ + "Figure 5: The transition flow between in-process suspicion of MiP and the final successful abstention on different reasoning models. For each Sankey diagram, the left bars represent whether the model suspects the given question is unsolvable during its thinking process, i.e., Suspected or Unsuspected; the right bars represent the final abstention, categorized into Abstain (preferred) or Non-abstain. Most existing reasoning models have suspected that the given question might be unsolvable, but only for a very small portion, the models insist on their suspicion." + ], + "image_footnote": [], + "bbox": [ + 210, + 335, + 426, + 443 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/55f6d6d44e00c345d156ece804a69f49e5675108b6ff23ec22d0988da4230591.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 437, + 347, + 552, + 441 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/c32976e483198128d4ac818aeb93f12c1a8e6bf15bc9101b38bf03bfec679bf4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 560, + 347, + 671, + 441 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/21aadeb460d77f548fd46ed66bde9e696ba650f5c7d534d57cd4c6f966643e65.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 681, + 348, + 792, + 441 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.2 What Caused MiP-Overthinking?", + "text_level": 1, + "bbox": [ + 169, + 536, + 449, + 551 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Figure 2 demonstrates that MiP-Overthinking manifests across both RL-based and SFT-based reasoning models. We hypothesize this phenomenon primarily originates from inadequate length constraints during the rule-based reinforcement learning phase of RL-based models, subsequently propagating to SFT-based models through distillation.", + "bbox": [ + 169, + 568, + 826, + 625 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Current RL-based reasoning models predominantly employ rule-based training focused on format and accuracy rewards (Shao et al., 2024; Sui et al., 2025), with some incorporating step or length rewards to promote thorough reasoning (Face, 2025). This approach can lead to reward hacking, where models explore excessive reasoning patterns to achieve correct answers (Aggarwal & Welleck, 2025; Shen et al., 2025; Luo et al., 2025).", + "bbox": [ + 169, + 625, + 517, + 750 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "To demonstrate the transmissibility of this behavior through distillation (Xu et al., 2024), we fine-tune Qwen-2.5-7B-Instruct using small-scale $50\\mathrm{MiP}$ responses generated by DeepSeek-R1 on the MiP-Formula dataset. As shown in Figure 6, the fine-tuned model exhibits clear MiP-Overthinking characteristics when evaluated on GSM8K: significantly increased response lengths for both MiP and well-defined questions, emergence of a length disparity between MiP and well-defined responses previously absent in the original model, and decreased abstain rates.", + "bbox": [ + 169, + 757, + 519, + 922 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/286734d33c261af06659ebd9d6ff6b8eed44dbbcfd33d1a2e95adb65f3264213.jpg", + "image_caption": [ + "Figure 6: Comparison of response length, abstain rate of MiP, and accuracy of well-defined questions before and after tuning on 50 responses from DeepSeek-R1 on the MiP-Formula dataset. The results demonstrate rapid onset of MiP-Overthinking behavior after exposure to a small number of MiP examples during fine-tuning." + ], + "image_footnote": [], + "bbox": [ + 532, + 633, + 851, + 792 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 330, + 46 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5 Related Work", + "text_level": 1, + "bbox": [ + 171, + 101, + 325, + 118 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "5.1 Reasoning Large Language Model", + "text_level": 1, + "bbox": [ + 171, + 138, + 454, + 154 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Recent advances in Large Language Models (LLMs) have sparked significant research interest in enhancing their reasoning capabilities (Ahn et al., 2024; Besta et al., 2025; Chen et al., 2025a). Research has focused on improving these capabilities through various post-training approaches. Several studies have employed reinforcement learning techniques to guide models toward more effective reasoning strategies (Shao et al., 2024; Xiong et al., 2025; Cui et al., 2025). Additionally, researchers have demonstrated that instruction tuning on carefully curated, high-quality datasets can significantly enhance reasoning performance (Ye et al., 2025; Muennighoff et al., 2025).", + "bbox": [ + 169, + 167, + 826, + 267 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "While Reasoning Models have demonstrated impressive performance on various benchmarks, recent studies have begun to critically examine the quality and efficiency of their reasoning processes. Xia et al. (2025) conducted a comprehensive analysis of RLMs' reasoning quality, revealing significant redundancy in their solution approaches. Further investigations (Chen et al., 2025b; Cuadron et al., 2025; Qu et al., 2025; Liu et al., 2025) identified a concerning \"overthinking\" phenomenon, where reasoning model generate unnecessarily verbose solutions even for simple problems. Building on these observations, Kumar et al. (2025) demonstrated the potential security implications of this behavior by developing a slowdown attack that exploits overthinking through input perturbation.", + "bbox": [ + 169, + 272, + 826, + 387 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "5.2 Test-time Scaling", + "text_level": 1, + "bbox": [ + 171, + 409, + 333, + 425 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "In contrast to earlier research on training-time scaling laws (Kaplan et al., 2020), recent literature has increasingly focused on test-time performance scaling strategies, which aim to enhance model performance by optimizing inference-time token generation (Snell et al., 2024; OpenAI, 2024a). These approaches can be categorized into several primary methodologies: parallel sampling techniques (Brown et al., 2024; Levi, 2024), which generate multiple candidate responses and select the optimal output; sequential refinement approaches (Snell et al., 2024; Lee et al., 2025), which enable iterative improvement of previous outputs; and tree-based methods (Gandhi et al., 2024; Hou et al., 2025), which combine elements of both parallel and sequential approaches. While the prevailing consensus suggests that increased token generation during inference enhances reasoning capabilities, our investigation reveals a concerning counterpoint: under certain conditions, extended responses can lead to computational inefficiency and, paradoxically, degraded performance outcomes.", + "bbox": [ + 169, + 439, + 828, + 594 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "5.3 Models' Behavior Study in Ambiguous Condition", + "text_level": 1, + "bbox": [ + 171, + 617, + 558, + 633 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "LLMs are prone to hallucination (Huang et al., 2025; Xu et al., 2025), generating non-existent conditions that compromise trustworthiness. An essential aspect of reliability is the ability to abstain under uncertainty. Prior work (Cole et al., 2023; Amayuelas et al., 2024; Zhou et al., 2023) has proposed benchmarks assessing LLMs' recognition of knowledge limits when facing ambiguous or challenging queries. Different from theirs, our study explores reasoning models under MiP condition. Surprisingly, we find these specialized models exhibit prolonged reasoning and inferior performance.", + "bbox": [ + 169, + 647, + 826, + 733 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 171, + 753, + 302, + 770 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We introduce the Overthinking under Missing Premise (MiP-Overthinking) issue, which is a widespread but still under-explored phenomenon for current reasoning models. In this phenomenon, when faced with ill-defined unsolvable questions with missing premises, existing models generate dramatically long responses while having very low abstain rates. With systematic investigation of this phenomenon, our findings show that while these models sometimes suspect the given MiP question is not solvable in the early state of the thinking process, they typically fail to act on those suspicions and instead generating repetitive and redundant thinking traces with the final answer that does not address the missing premises, indicating a lack of critical thinking capability. This behavior highlights a pressing gap: current training recipes for reasoning models, which emphasize thorough chains of thought, do not sufficiently reward critical thinking or early exit from unsolvable tasks.", + "bbox": [ + 169, + 784, + 826, + 925 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 171, + 32, + 330, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 960 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 102, + 269, + 117 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Marah Abdin, Jyoti Aneja, Hany Awadalla, Ahmed Awadallah, Ammar Ahmad Awan, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Jianmin Bao, Harkirat Behl, and etc. Phi-3 technical report: A highly capable language model locally on your phone, 2024. URL https://arxiv.org/abs/2404.14219.", + "Pranjal Aggarwal and Sean Welleck. L1: Controlling how long a reasoning model thinks with reinforcement learning, 2025. URL https://arxiv.org/abs/2503.04697.", + "Janice Ahn, Rishu Verma, Renze Lou, Di Liu, Rui Zhang, and Wenpeng Yin. Large language models for mathematical reasoning: Progresses and challenges. In Neele Falk, Sara Papi, and Mike Zhang (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics: Student Research Workshop, pp. 225-237, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-srw.17/.", + "Alfonso Amayuelas, Kyle Wong, Liangming Pan, Wenhu Chen, and William Wang. Knowledge of knowledge: Exploring known-unknowns uncertainty with large language models, 2024. URL https://arxiv.org/abs/2305.13712.", + "Maciej Besta, Julia Barth, Eric Schreiber, Ales Kubicek, Afonso Catarino, Robert Gerstenberger, Piotr Nczyk, Patrick Iff, Yueling Li, Sam Houliston, Tomasz Sternal, Marcin Copik, Grzegorz Kwaśniewski, Jürgen Müller, Łukasz Flis, Hannes Eberhard, Hubert Niewiadomski, and Torsten Hoefler. Reasoning language models: A blueprint, 2025. URL https://arxiv.org/abs/2501.11223.", + "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V. Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling, 2024. URL https://arxiv.org/abs/2407.21787.", + "Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiannan Guan, Peng Wang, Mengkang Hu, Yuhang Zhou, Te Gao, and Wanxiang Che. Towards reasoning era: A survey of long chain-of-thought for reasoning large language models, 2025a. URL https://arxiv.org/abs/2503.09567.", + "Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, Rui Wang, Zhaopeng Tu, Haitao Mi, and Dong Yu. Do not think that much for $2 + 3 = ?$ on the overthinking of o1-like llms, 2025b. URL https://arxiv.org/abs/2412.21187.", + "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.", + "Jeremy R. Cole, Michael J. Q. Zhang, Daniel Gillick, Julian Martin Eisenschlos, Bhuwan Dhingra, and Jacob Eisenstein. Selectively answering ambiguous questions, 2023. URL https://arxiv.org/abs/2305.14613.", + "Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, Nicholas Thumiger, Aditya Desai, Ion Stoica, Ana Klimovic, Graham Neubig, and Joseph E. Gonzalez. The danger of overthinking: Examining the reasoning-action dilemma in agentic tasks, 2025. URL https://arxiv.org/abs/2502.08235.", + "Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, Jiarui Yuan, Huayu Chen, Kaiyan Zhang, Xingtai Lv, Shuo Wang, Yuan Yao, Xu Han, Hao Peng, Yu Cheng, Zhiyuan Liu, Maosong Sun, Bowen Zhou, and Ning Ding. Process reinforcement through implicit rewards, 2025. URL https://arxiv.org/abs/2502.01456." + ], + "bbox": [ + 171, + 126, + 828, + 922 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 330, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, and etc. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948.", + "Hugging Face. Open r1: A fully open reproduction of deepseek-r1, January 2025. URL https://github.com/huggingface/open-r1.", + "Kanishk Gandhi, Denise Lee, Gabriel Grand, Muxin Liu, Winson Cheng, Archit Sharma, and Noah D. Goodman. Stream of search (sos): Learning to search in language, 2024. URL https://arxiv.org/abs/2404.03683.", + "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. NeurIPS, 2021.", + "Zhenyu Hou, Xin Lv, Rui Lu, Jiajie Zhang, Yujiang Li, Zijun Yao, Juanzi Li, Jie Tang, and Yuxiao Dong. Advancing language model reasoning through reinforcement learning and inference scaling, 2025. URL https://arxiv.org/abs/2501.11651.", + "Jie Huang and Kevin Chen-Chuan Chang. Towards reasoning in large language models: A survey, 2023. URL https://arxiv.org/abs/2212.10403.", + "Lei Huang, Weijiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qianglong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, and Ting Liu. A survey on hallucination in large language models: Principles, taxonomy, challenges, and open questions. ACM Transactions on Information Systems, 43(2):1-55, January 2025. ISSN 1558-2868. doi: 10.1145/3703155. URL http://dx.doi.org/10.1145/3703155.", + "Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B. Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models, 2020. URL https://arxiv.org/abs/2001.08361.", + "Abhinav Kumar, Jaechul Roh, Ali Naseh, Marzena Karpinska, Mohit Iyyer, Amir Houmansadr, and Eugene Bagdasarian. Overthink: Slowdown attacks on reasoning llms, 2025. URL https://arxiv.org/abs/2502.02542.", + "Kuang-Huei Lee, Ian Fischer, Yueh-Hua Wu, Dave Marwood, Shumeet Baluja, Dale Schuurmans, and Xinyun Chen. Evolving deeper llm thinking, 2025. URL https://arxiv.org/abs/2501.09891.", + "Noam Levi. A simple model of inference scaling laws, 2024. URL https://arxiv.org/abs/2410.16377.", + "Ming Li, Yanhong Li, and Tianyi Zhou. What happened in llms layers when trained for fast vs. slow thinking: A gradient perspective. arXiv preprint arXiv:2410.23743, 2024.", + "Changshu Liu, Shizhuo Dylan Zhang, Ali Reza Ibrahimzada, and Reyhaneh Jabbarvand. Codemind: A framework to challenge large language models for code reasoning, 2024. URL https:// arxiv.org/abs/2402.09664.", + "Yue Liu, Jiaying Wu, Yufei He, Hongcheng Gao, Hongyu Chen, Baolong Bi, Jiaheng Zhang, Zhiqi Huang, and Bryan Hooi. Efficient inference for large reasoning models: A survey, 2025. URL https://arxiv.org/abs/2503.23077.", + "Haotian Luo, Li Shen, Haiying He, Yibo Wang, Shiwei Liu, Wei Li, Naiqiang Tan, Xiaochun Cao, and Dacheng Tao. O1-pruner: Length-harmonizing fine-tuning for o1-like reasoning pruning, 2025. URL https://arxiv.org/abs/2501.12570.", + "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 328, + 46 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "OpenAI. Learning to reason with llms, 2024a. URL https://openai.com/index/learning-to-reason-with-llms/.", + "OpenAI. OpenAI o1 System Card, December 2024b. URL https://cdn.openai.com/ol-system-card-20241205.pdf.", + "OpenAI. OpenAI o1-mini System Card, September 2024c. URL https://openai.com/index/openai-o1-mini-advancing-cost-efficient-reasoning/.", + "OpenAI. OpenAI o3-mini System Card, January 2025. URL https://cdn.openai.com/o3-mini-system-card-feb10.pdf.", + "OpenAI, Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, and etc. Gpt-4 technical report, 2024. URL https://arxiv.org/abs/2303.08774.", + "Arkil Patel, Satwik Bhattachamishra, and Navin Goyal. Are NLP models really able to solve simple math word problems? In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 2080-2094, Online, June 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.naacl-main.168. URL https://aclanthology.org/2021.naacl-main.168.", + "Xiaoye Qu, Yafu Li, Zhaochen Su, Weigao Sun, Jianhao Yan, Dongrui Liu, Ganqu Cui, Daizong Liu, Shuxian Liang, Junxian He, Peng Li, Wei Wei, Jing Shao, Chaochao Lu, Yue Zhang, Xian-Sheng Hua, Bowen Zhou, and Yu Cheng. A survey of efficient reasoning for large reasoning models: Language, multimodality, and beyond, 2025. URL https://arxiv.org/abs/2503.21614.", + "Nils Reimers and Iryna Gurevych. Sentence-BERT: Sentence embeddings using Siamese BERT-networks. In Kentaro Inui, Jing Jiang, Vincent Ng, and Xiaojun Wan (eds.), Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 3982-3992, Hong Kong, China, November 2019. Association for Computational Linguistics. doi: 10.18653/v1/D19-1410. URL https://aclanthology.org/D19-1410/.", + "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, Y. K. Li, Y. Wu, and Daya Guo. Deepseekmath: Pushing the limits of mathematical reasoning in open language models, 2024. URL https://arxiv.org/abs/2402.03300.", + "Yi Shen, Jian Zhang, Jieyun Huang, Shuming Shi, Wenjing Zhang, Jiangze Yan, Ning Wang, Kai Wang, and Shiguo Lian. Dast: Difficulty-adaptive slow-thinking for large reasoning models, 2025. URL https://arxiv.org/abs/2503.04472.", + "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https://arxiv.org/abs/2408.03314.", + "Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Shaochen Zhong, Hanjie Chen, and Xia Hu. Stop overthinking: A survey on efficient reasoning for large language models, 2025. URL https://arxiv.org/abs/2503.16419.", + "Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, Soroosh Mariooryad, Yifan Ding, Xinyang Geng, and etc. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context, 2024a. URL https://arxiv.org/abs/2403.05530.", + "Gemma Team, Morgane Riviere, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, and etc. Gemma 2: Improving open language models at a practical size, 2024b. URL https://arxiv.org/abs/2408.00118.", + "Qwen Team. Qwen2.5: A party of foundation models, September 2024. URL https://qwenlm.github.io/blog/qwen2.5/." + ], + "bbox": [ + 171, + 102, + 828, + 924 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 171, + 32, + 330, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 508, + 960 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025. URL https://qwenlm.github.io/blog/qwq-32b/.", + "Yaojing Wang, Shengqiong Wu, Yuecheng Zhang, Shuicheng Yan, Ziwei Liu, Jiebo Luo, and Hao Fei. Multimodal chain-of-thought reasoning: A comprehensive survey, 2025. URL https://arxiv.org/abs/2503.12605.", + "Yuqing Wang and Yun Zhao. Gemini in reasoning: Unveiling commonsense in multimodal large language models, 2023. URL https://arxiv.org/abs/2312.17661.", + "Shijie Xia, Xuefeng Li, Yixin Liu, Tongshuang Wu, and Pengfei Liu. Evaluating mathematical reasoning beyond accuracy, 2025. URL https://arxiv.org/abs/2404.05692.", + "Wei Xiong, Hanning Zhang, Chenlu Ye, Lichang Chen, Nan Jiang, and Tong Zhang. Self-rewarding correction for mathematical reasoning, 2025. URL https://arxiv.org/abs/2502.19613.", + "Xiaohan Xu, Ming Li, Chongyang Tao, Tao Shen, Reynold Cheng, Jinyang Li, Can Xu, Dacheng Tao, and Tianyi Zhou. A survey on knowledge distillation of large language models, 2024. URL https://arxiv.org/abs/2402.13116.", + "Ziwei Xu, Sanjay Jain, and Mohan Kankanhalli. Hallucination is inevitable: An innate limitation of large language models, 2025. URL https://arxiv.org/abs/2401.11817.", + "Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning, 2025. URL https://arxiv.org/abs/2502.03387.", + "Kaitlyn Zhou, Dan Jurafsky, and Tatsunori Hashimoto. Navigating the grey area: How expressions of uncertainty and overconfidence affect language models, 2023. URL https://arxiv.org/abs/2302.13439." + ], + "bbox": [ + 171, + 102, + 826, + 489 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 171, + 32, + 330, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table of Contents for Appendix", + "text_level": 1, + "bbox": [ + 171, + 101, + 439, + 119 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A Detailed Experimental Setup 17", + "bbox": [ + 174, + 137, + 825, + 152 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A.1 Models 17", + "A.2 Evaluation Metrics 17", + "A.3 Generation Setting 17" + ], + "bbox": [ + 197, + 159, + 823, + 215 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "B Data Construction Details 17", + "C Prompt Template for Evaluation 18", + "D Examples of Model Response 19" + ], + "bbox": [ + 173, + 234, + 823, + 318 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 328, + 46 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 946, + 506, + 959 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A Detailed Experimental Setup", + "text_level": 1, + "bbox": [ + 171, + 102, + 450, + 119 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A.1 Models", + "text_level": 1, + "bbox": [ + 171, + 133, + 267, + 148 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We leverage a series of non-reasoning and reasoning model for our study, from both open-source and proprietary source with different training recipes. The non-reasoning models we use include Qwen2.5-32B-Instruct Team (2024), Gemma-2-27B-it Team et al. (2024b), Phi-3-medium-128k Abdin et al. (2024), GPT-4o OpenAI et al. (2024) and Gemini1.5 Team et al. (2024a). The reasoning models we use are QwQ-32B Team (2025), DeepSeek-R1-Distill-Qwen-32B DeepSeek-AI et al. (2025), S1.1 Muennighoff et al. (2025), DeepSeek-R1 DeepSeek-AI et al. (2025), GPT-o1 OpenAI (2024b), GPT-o1mini OpenAI (2024c) and GPT-o3mini OpenAI (2025).", + "bbox": [ + 169, + 160, + 826, + 258 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A.2 Evaluation Metrics", + "text_level": 1, + "bbox": [ + 171, + 276, + 351, + 290 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In Section 3.2, we measure response length by considering both reasoning and answer components. For open-source models, we employ model-specific tokenizers to calculate token counts, while for proprietary models, we obtain generation lengths via their APIs. To determine abstain rates, we parse responses by paragraphs (delimited by $\\backslash \\mathrm{n} \\backslash \\mathrm{n}^{\\prime}$ ) and analyze the final two paragraphs as the model's conclusion. These conclusions, along with reference answers when available, are evaluated by GPT-4o to assess whether the model provides a definitive answer or abstains. For data sets with reference answers (GSM8K and MATH), GPT-4o also evaluates the correctness of the response. The prompt we use for evaluation can be found in Appendix C.", + "bbox": [ + 169, + 301, + 826, + 414 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A.3 Generation Setting", + "text_level": 1, + "bbox": [ + 171, + 431, + 349, + 448 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "For all open-source models, we employ greedy decoding and utilize the default chat template specific to each model. We deliberately omit system prompts prior to posing questions to maintain consistency across evaluations. For proprietary models, we adhere to their default parameter configurations as provided by their respective APIs. In the case of GPT-o1mini and GPT-o3mini, we configure the 'reasoning_effort' parameter to the medium setting by default.", + "bbox": [ + 169, + 458, + 823, + 529 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "B Data Construction Details", + "text_level": 1, + "bbox": [ + 171, + 550, + 428, + 566 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "To systematically investigate this MiP-Overthinking issue, we construct a suite of MiP questions in a controllable manner. Our MiP questions are sourced from 3 math datasets across different qualities, including SVAMP, GSM8K, and MATH 500. In addition, we also construct a synthetic dataset, rule-based Formula, for evaluation.", + "bbox": [ + 169, + 582, + 826, + 638 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "MiP-Formula. We construct a dataset of 50 synthetic unsolvable formulas in a rule-based manner. The formulas are generated recursively through a combination of variables and operators, with a maximum recursion depth of three. The variable set comprises numerical values, Latin letters, and Greek symbols. The operator set includes arithmetic operators $(+, -)$ , set operators $(\\cup, \\supset)$ , mathematical functions $(\\sin, \\sqrt{\\pi})$ , and construct operators $(\\sum, \\nabla)$ . To ensure the formulas are fundamentally unsolvable, we enforce the inclusion of at least one unassigned variable in each formula, excluding commonly recognized mathematical or physical constants such as $e^{\\prime}$ , $\\pi^{\\prime}$ , and $g^{\\prime}$ . While these formulas may appear complex at a glance, their unsolvability should be immediately apparent due to the presence of undefined variables.", + "bbox": [ + 169, + 645, + 826, + 771 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "MiP-SVAMP. We utilize SVAMP (Patel et al., 2021), a benchmark dataset comprising 1,000 elementary-school-level mathematical word problems, where each instance consists of a problem body and an associated question. The MiP questions can be generated by randomly permuting the problem bodies and associated questions. To maintain dataset integrity, we manually select 300 permuted questions after a thorough human evaluation to eliminate any inadvertently solvable questions that may exist. The resulting problems contain clear logical inconsistencies between their body and question components, making their unsolvability readily apparent without additional context.", + "bbox": [ + 169, + 777, + 823, + 888 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "MiP-GSM8K. We further utilize GSM8K (Cobbe et al., 2021), a grade school mathematics dataset that presents more complex challenges compared to SVAMP. The questions in GSM8K typically", + "bbox": [ + 169, + 895, + 823, + 925 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 171, + 32, + 330, + 46 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "contain multiple numerical conditions and require certain reasoning capabilities to arrive at solutions. The MiP question can be constructed by randomly removing a necessary premise from the original solvable question. We first identify the questions containing two or three numerical conditions and then randomly eliminate one numerical condition per question. Subsequently, a thorough human verification is conducted to filter out those questions that are still solvable in some way and finally obtain 582 MiP questions. Compared with previous MiP questions, questions from this source require the basic logical analysis of models to identify that the question is unsolvable.", + "bbox": [ + 169, + 103, + 826, + 202 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "MiP-MATH. For the MATH dataset (Hendrycks et al., 2021), which comprises challenging competition-level mathematical questions, it is hard to build a rule-based filtering mechanism before human evaluation. Thus, we directly read through all the questions in MATH500 and manually select 58 questions that are feasible for constructing the MiP questions and remove one necessary premise from the question. Due to the sophisticated nature of this data source, identifying the insufficiency of these instances requires substantial mathematical reasoning capabilities, testing models' ability to recognize unsolvability in complex mathematical contexts.", + "bbox": [ + 169, + 208, + 826, + 308 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "C Prompt Template for Evaluation", + "text_level": 1, + "bbox": [ + 171, + 328, + 482, + 345 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "As we need LLM-as-a-judge to evaluate the open-end generations of the models in various experiments in this study, in this section we showcase the prompt template we use for each kind of evaluation.", + "bbox": [ + 169, + 361, + 823, + 390 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "For the evaluation of the models' answer accuracy and abstain rate, we adopt the following prompt templates designed for 'paired' and 'non-paired' data, respectively. As we observe that some models, for example Gemma-2-27B-IT, often output an additional $\\backslash n\\backslash n$ at the end of response, we take the last two paragraph segmented by $\\backslash n\\backslash n$ to avoid pasing in an empty string.", + "bbox": [ + 169, + 396, + 826, + 454 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Prompt Template for Response Evaluation of Formula and SVAMP", + "text_level": 1, + "bbox": [ + 261, + 470, + 733, + 486 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "You are a helpful assistant that evaluates the quality of a model's answer. You will be given a question and a model's answer. You need to evaluate the correctness of the model's answer. If the answer explicitly says that the condition of the question is insufficient, you should return 0. If the model provides an answer that is a number or formula with variables, you should return 1. Please only return the number, no other text.", + "bbox": [ + 191, + 496, + 803, + 560 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Model answer: [model_answer_short]", + "bbox": [ + 192, + 560, + 419, + 574 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Figure 7: The prompt we use to evaluate the accuracy and abstain rate of the model on Formula and SVAMP. [model_answer_short] is the last two paragraphs of the model answer and [reference_answer] is the answer for the original dataset.", + "bbox": [ + 169, + 606, + 823, + 650 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Prompt Template for Response Evaluation of GSM8K and MATH", + "text_level": 1, + "bbox": [ + 266, + 659, + 730, + 674 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "You are a helpful assistant that evaluates the quality of a model's answer. You will be given a question and a model's answer. You need to evaluate the correctness of the model's answer. If the model output says that the condition of the question is insufficient, you should return 0. Otherwise, if the model gives a clear answer and matches the reference answer, you should return 1. If the model's answer does not match the reference answer, you should return 2. Please only return the number, no other text.", + "bbox": [ + 191, + 685, + 803, + 750 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Model answer: [model_answer_short]", + "bbox": [ + 192, + 750, + 419, + 762 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Reference answer: [reference_answer]", + "bbox": [ + 192, + 762, + 423, + 775 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Figure 8: The prompt we use to evaluate the accuracy and abstain rate of the model on GSM8K and MATH. [model_answer_short] is the last two paragraphs of the model answer and [reference_answer] is the answer for the original dataset.", + "bbox": [ + 169, + 808, + 823, + 851 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We use the prompt template in Figure 9 to find the first paragraph that the model suspected a missing premise. We pass in the response sequentially by paragraph until the GPT-4o give a positive response. In practice we find it is not very stable, so we repeat this process for 3 times and use the median value.", + "bbox": [ + 169, + 867, + 826, + 912 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 330, + 46 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 948, + 506, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Prompt Template for Response Evaluation of GSM8K and MATH", + "text_level": 1, + "bbox": [ + 267, + 104, + 730, + 119 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "You are a helpful assistant. You will be given a paragraph which is part of the answer to a question. You need to identify if the paragraph doubt that the answer depends on some other unspecified condition.", + "bbox": [ + 192, + 128, + 803, + 155 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Paragraph: [paragraph]", + "bbox": [ + 192, + 155, + 334, + 167 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Answer in one word, yes or no.", + "bbox": [ + 192, + 169, + 380, + 181 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Figure 9: The prompt we use to judge if the model suspect there is a missing premise in the response paragraph. [paragraph] is the part of the model response spited by $\\backslash n\\backslash n$", + "bbox": [ + 169, + 214, + 823, + 244 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "D Examples of Model Response", + "text_level": 1, + "bbox": [ + 171, + 279, + 455, + 297 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In this section, we present some examples of the model response of both non-reasoning and reasoning model on MiP data. As we can see from Figure 10 and Figure 11, the non-reasoning models soon identify the missing premise issue of the question. They either abstain from answering the question, as in Figure 10, or friendly invite the user to provide more information. However, as we can see from Figure 11 and Figure 13, reasoning models generate extremely verbose answers on these two apparently premise missing problems. What is worse, they fail to abstain to answer the question. The response in Figure 11 arrives at an absurd answer, and the model in Figure 13 generates a hallucinated answer based on its assumption rather than provided information.", + "bbox": [ + 169, + 310, + 826, + 422 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Model Response for a MiP-Formula Question from Gemini-1.5", + "text_level": 1, + "bbox": [ + 277, + 438, + 720, + 454 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Question: What is the value of $((\\gamma)) + \\Xi$", + "bbox": [ + 192, + 464, + 455, + 478 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Model Response:", + "text_level": 1, + "bbox": [ + 194, + 478, + 305, + 489 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The expression $((\\gamma)) + \\Xi$ involves two symbols that need to be defined to determine the value of the expression.", + "bbox": [ + 192, + 489, + 803, + 515 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "$\\gamma$ is the Euler-Mascheroni constant, approximately equal to 0.5772156649. The double parentheses notation $(\\gamma)$ is not a standard mathematical notation. Without further definition of this notation, we cannot evaluate this term.", + "bbox": [ + 192, + 515, + 803, + 551 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "$\\Xi$ is the Riemann Xi function. It is related to the Riemann zeta function, but the notation $\\Xi$ itself isn't a specific numerical value. The Riemann Xi function is defined as a function of a complex variable, not a constant. So $\\Xi$ by itself doesn't have a specific numerical value unless a specific value is substituted for its argument.", + "bbox": [ + 192, + 553, + 803, + 604 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Therefore, we cannot determine the value of $((\\gamma)) + \\Xi$ without knowing what $((\\gamma))$ and the argument of $\\Xi$ represent. The expression is incomplete without definitions for these symbols.", + "bbox": [ + 192, + 604, + 803, + 630 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Final Answer: The final answer is undefined", + "bbox": [ + 194, + 630, + 477, + 650 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Figure 10: An example of model response from Gemini_1.5 on MiP-Formula dataset. The model quickly identify the missing premise and abstain to answer.", + "bbox": [ + 169, + 684, + 823, + 713 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 330, + 46 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Model Response for a MiP-GSM8k Question from GPT-40", + "text_level": 1, + "bbox": [ + 292, + 395, + 705, + 411 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Question: Kyle bought last year's best-selling book for $19.50. What was the original price of the book?", + "bbox": [ + 192, + 421, + 803, + 436 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Model Response:", + "text_level": 1, + "bbox": [ + 194, + 436, + 305, + 446 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "To determine the original price of the book, we generally need to know the discount or markdown percentage applied to calculate its selling price of $19.50. If Kyle bought the book for$ 19.50 and we know it was discounted, we can use the following formula to find out the original price:", + "bbox": [ + 192, + 446, + 802, + 484 + ], + "page_idx": 19 + }, + { + "type": "equation", + "text": "\n$$\nS e l l i n g P r i c e = \\text {O r i g i n a l P r i c e} - (\\text {O r i g i n a l P r i c e} \\times \\text {D i s c o u n t R a t e})\n$$\n", + "text_format": "latex", + "bbox": [ + 299, + 497, + 696, + 511 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Without additional information about discounts, promotions, or markdown rates applied, it's not possible to precisely determine the original price solely from the amount Kyle paid.", + "bbox": [ + 192, + 527, + 802, + 553 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "If you provide the discount percentage or rate, I can help calculate the original price for you!", + "bbox": [ + 194, + 553, + 741, + 566 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Figure 11: An example of model response from GPT-4o on MiP-GSM8k dataset. The model quickly identify the missing premise and ask the user for more information.", + "bbox": [ + 169, + 599, + 823, + 628 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 330, + 46 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Model Response for a MiP-Formula Question from s1.1-32B", + "text_level": 1, + "bbox": [ + 287, + 135, + 709, + 150 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Question: What is the value of $((\\gamma)) + \\Xi$", + "bbox": [ + 192, + 161, + 454, + 176 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Model Response:", + "text_level": 1, + "bbox": [ + 194, + 176, + 305, + 186 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Okay, so I need to figure out the value of $((\\gamma)) + \\Xi$ . Hmm, first off, I should probably clarify what these symbols represent. The problem uses double parentheses around gamma, so $((\\gamma))$ , and then the Greek letter Xi (Ξ). I wonder if these are standard mathematical symbols or if they're defined in a specific context that I might not be aware of.", + "bbox": [ + 192, + 186, + 800, + 238 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Let me start by recalling what I know about gamma. In mathematics, $\\gamma$ often refers to the Euler-Mascheroni constant, which is approximately 0.5772. It's a important constant in number theory and analysis, related to the harmonic series and the gamma function. But here, the notation is $((\\gamma))$ . The double parentheses could mean something specific. Maybe it's a notation from a particular field or textbook? For example, in some contexts, double parentheses might denote an interval or a floor function, but usually, floor is denoted by brackets like $(\\gamma)$ . Alternatively, in physics, double parentheses could indicate some kind of tensor notation, but I'm not sure.", + "bbox": [ + 192, + 238, + 802, + 325 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Then there's the Xi (Ξ). Xi is another Greek letter, but in mathematics, Ξ could stand for various things. In set theory, sometimes Xi is used as a variable for ordinals, or in other areas like particle physics, Ξ represents certain particles. But again, without context, it's hard to say. Maybe it's a function or a constant defined in the problem's context? Wait, the question is just asking for the value of $((\\gamma)) + \\Xi$ without any additional information. That makes me think these symbols might be part of a puzzle or a play on notation rather than standard mathematical constants.", + "bbox": [ + 192, + 325, + 802, + 400 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Let me consider if this could be a trick question. The double parentheses around gamma(( $\\gamma$ ) ) maybe that's referring to the number of letters in the word \"gamma\" or something like that? Let's see: Gamma is spelled G-A-M-M-A, which is 5 letters. So (( $\\gamma$ )) could be 5. Then $\\Xi$ is the Greek letter Xi, which is the 14th letter of the Greek alphabet. So maybe $\\Xi$ represents 14? Then $5 + 14$ would be 19. Is that possible? But is there a standard where Greek letters are assigned numerical values like Roman numerals? Wait, in Greek numerals, each letter does have a numerical value. Let me check that.", + "bbox": [ + 192, + 401, + 802, + 476 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In the Greek numeral system, letters are assigned values as follows:", + "bbox": [ + 194, + 477, + 594, + 489 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": ".67 more paragraphs here.", + "text_level": 1, + "bbox": [ + 418, + 491, + 576, + 502 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "But just to check another angle: In the context of APL programming language, certain symbols have specific meanings, but I don't think that's relevant here.", + "bbox": [ + 192, + 502, + 800, + 527 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Alternatively, in chemistry, $\\gamma$ is used to denote surface tension, and $\\Xi$ is not a standard symbol. In engineering, $\\gamma$ is sometimes the shear strain. But again, not relevant.", + "bbox": [ + 192, + 527, + 800, + 551 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Alternatively, in statistics, gamma is a distribution, and Xi could be a parameter, but again, without context.", + "bbox": [ + 192, + 551, + 800, + 575 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Alternatively, the symbols might be from a specific equation in a paper or book, but since I don't have that reference, I can't confirm.", + "bbox": [ + 192, + 577, + 800, + 602 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Given all these considerations, and trying to think of what the answer might be expected to be in a problem like this, it's likely either 17 or 63. If the question is from a math competition or puzzle, they might be expecting the Greek numeral values. For example, in the Greek numeral system, $\\gamma$ is 3 and $\\Xi$ is 60, so $3 + 60 = 63$ .", + "bbox": [ + 192, + 603, + 802, + 652 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Alternatively, if it's based on their positions in the alphabet, $3 + 14 = 17$ .", + "bbox": [ + 192, + 652, + 620, + 666 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": ".21 more paragraphs here", + "text_level": 1, + "bbox": [ + 418, + 667, + 576, + 678 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Therefore, the answer is 63. The double parentheses around gamma might be a typo or just for decoration, but given the problem's phrasing, interpreting them as Greek numerals seems the most logical.", + "bbox": [ + 192, + 678, + 802, + 704 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "*Final Answer** $\\boxed{63}$ < /think> To determine the value of $((\\gamma) + \\Xi)$ , we need to consider the possible meanings of the symbols $(\\gamma)$ and $(\\Xi)$ .", + "bbox": [ + 192, + 705, + 800, + 732 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "1. **Greek Numerals**: In the Greek numeral system, each letter has a specific numerical value. The letter $(\\gamma)$ (gamma) represents the value 3, and the letter $(\\Xi)$ (Xi) represents the value 60. 2. **Double Parentheses**: The notation $((\\gamma))$ might be a red herring or simply emphasize that $(\\gamma)$ is being used as a numeral. The parentheses do not change the numerical value of $(\\gamma)$ .", + "bbox": [ + 192, + 732, + 800, + 782 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Combining these values, we add the numerical values of $(\\gamma)$ and $(\\Xi)$ : $-(\\gamma = 3) - (\\Xi = 60)$", + "bbox": [ + 192, + 782, + 720, + 795 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Thus, the value of $((\\gamma) + \\Xi)$ is $(3 + 60 = 63)$ .", + "bbox": [ + 194, + 795, + 459, + 806 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "63", + "bbox": [ + 196, + 808, + 222, + 825 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Figure 12: An example of response from s1.1 model on MiP-Formula data. The model spend lots of time doing inefficient and redundant reasoning before outputting a meaningless result.", + "bbox": [ + 169, + 859, + 823, + 888 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 330, + 46 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Model Response for a MiP-GSM8k Question from DeepSeek-R1", + "text_level": 1, + "bbox": [ + 272, + 104, + 722, + 119 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Question: Kyle bought last year's best-selling book for $19.50. What was the original price of the book?", + "bbox": [ + 192, + 130, + 803, + 143 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Model Response:", + "text_level": 1, + "bbox": [ + 194, + 143, + 305, + 156 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Okay, let's see. Kyle bought a book for $19.50, and the question is asking for the original price. Hmm, wait a second. The problem mentions that it's last year's best-selling book, so maybe it was on sale or discounted? The question doesn't explicitly state that there was a discount, but it does say \"last year's,\" which might imply that the book is now being sold at a reduced price.", + "bbox": [ + 192, + 156, + 803, + 205 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "But hold on, the problem doesn't give any specific information about a discount percentage or a markdown. Without that, how can we calculate the original price? Maybe there's some context or common knowledge I'm missing here. For example, sometimes best-selling books are sold at a standard discount, but that's not a universal rule.", + "bbox": [ + 192, + 205, + 803, + 256 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": " Alternatively, maybe the question is testing understanding of something else, like sales tax or profit margins. But if it were sales tax, the original price would be less than $19.50, and the total after tax would be more. But the question is asking for the original price, implying that $19.50 is the price after some change.", + "bbox": [ + 192, + 256, + 803, + 306 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Wait, perhaps the problem is incomplete? The user just says \"Kyle bought last year's best-selling book for $19.50. What was the original price of the book?\" If there's no additional information given, like a percentage discount or a mention that the price was reduced, then it's impossible to determine the original price.", + "bbox": [ + 192, + 306, + 803, + 357 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "But maybe I need to make an assumption here. Since it's last year's best-selling book, it's common for such books to be discounted when a new edition comes out or after some time. Let me think, maybe the standard discount is $25\\%$ or something? But the problem doesn't specify.", + "bbox": [ + 192, + 357, + 803, + 395 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Wait, perhaps the problem was part of a larger set where the discount rate was mentioned earlier, and this is a follow-up question. But as it stands, with the information given here, there's no way to calculate the original price.", + "bbox": [ + 192, + 395, + 803, + 433 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Alternatively, maybe the question is a trick question, where the original price is the same as what Kyle paid. But that doesn't make sense because it specifies \"last year's best-selling book,\" which might imply it was sold at a different price before.", + "bbox": [ + 192, + 433, + 803, + 470 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": " Let me check the problem statement again: \"Kyle bought last year's best-selling book for $19.50. What was the original price of the book?\" There's no mention of a discount, a sale, or any other financial context.", + "bbox": [ + 192, + 470, + 803, + 507 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": " If I were to approach this mathematically, let's denote the original price as P. If there was a discount, then $19.50 is equal to P minus some discount. But without knowing the discount rate, we can't set up an equation. For example, if the book was discounted by 20%, then the original price would be $19.50 / 0.80. But since we don't have that percentage, we can't proceed.", + "bbox": [ + 192, + 508, + 803, + 559 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": ".24 more paragraphs here", + "bbox": [ + 419, + 559, + 828, + 571 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": " Alternatively, if the answer is expected to be $19.50, but that doesn't make sense because the question is asking for the original price, implying it was different.", + "bbox": [ + 192, + 571, + 803, + 595 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Alternatively, maybe the problem is in another language where \"original price\" refers to the price without tax, but in the US, sales tax isn't included in the price, so if \\(19.50 is including tax, the original pre-tax price would be less. But again, without tax rate, can't compute.", + "bbox": [ + 192, + 595, + 803, + 633 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Alternatively, if the problem is from a country where tax is included, but the question is asking for the pre-tax price, but again, no tax rate given.", + "bbox": [ + 192, + 633, + 803, + 659 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "This is really confusing. Since the problem is presented in English and mentions dollars, likely US context. In the US, sales tax is added at checkout, so the price Kyle paid would be the shelf price plus tax. If the question is asking for the pre-tax price, but again, no tax rate given.", + "bbox": [ + 192, + 659, + 803, + 696 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Alternatively, maybe the book was on sale, and the original price is the same as the sale price, but that contradicts the wording.", + "bbox": [ + 192, + 696, + 803, + 722 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": " Given all this, I think the most plausible answer, even though it's based on an assumption, is $26, assuming a 25% discount. Therefore, the original price was $26. To determine the original price of the book, we need to consider that Kyle likely purchased it at a discounted price since it's \"last year's best-selling book.\" A common assumption for such scenarios is a **25% discount**.", + "bbox": [ + 192, + 722, + 803, + 772 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Let $P$ be the original price. If the discounted price is", + "bbox": [ + 194, + 772, + 506, + 785 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "$19.50, this represents 75% of the original price (100% - 25% discount):", + "bbox": [ + 194, + 785, + 622, + 797 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "$[0.75P = 19.50]$", + "bbox": [ + 196, + 797, + 295, + 810 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Solving for $P$ :", + "bbox": [ + 196, + 810, + 281, + 821 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "[ P = \\frac{19.50}{0.75} = 26 ]", + "bbox": [ + 196, + 821, + 300, + 838 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "*Original price of the book:**", + "bbox": [ + 196, + 838, + 374, + 849 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "26", + "bbox": [ + 196, + 849, + 222, + 867 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Figure 13: An example of model response from DeepSeek-R1 on MiP-GSM8k dataset. After thinking for a long time, the model hallucinates an answer based on its assumption of discount rate.", + "bbox": [ + 169, + 902, + 823, + 930 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Preprint. Under review.", + "bbox": [ + 173, + 32, + 330, + 46 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 21 + } +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06514/77b594f5-774b-462e-ab7b-7501ea055175_model.json b/data/2025/2504_06xxx/2504.06514/77b594f5-774b-462e-ab7b-7501ea055175_model.json new file mode 100644 index 0000000000000000000000000000000000000000..616a230f7cdd4084d10c72fb3ef884f35bdab5d1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/77b594f5-774b-462e-ab7b-7501ea055175_model.json @@ -0,0 +1,3874 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.331, + 0.048 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.099, + 0.713, + 0.141 + ], + "angle": 0, + "content": "Missing Premise exacerbates Overthinking: Are Reasoning Models losing Critical Thinking Skill?" + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.165, + 0.565, + 0.181 + ], + "angle": 0, + "content": "Chenrui Fan\\(^{1*}\\), Ming Li\\(^{1*}\\), Lichao Sun\\(^{2}\\), Tianyi Zhou\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.182, + 0.481, + 0.197 + ], + "angle": 0, + "content": "1University of Maryland; 2Lehigh University" + }, + { + "type": "text", + "bbox": [ + 0.185, + 0.198, + 0.505, + 0.211 + ], + "angle": 0, + "content": "{cfan42, minglii, tianyi}@umd.edu" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.212, + 0.693, + 0.225 + ], + "angle": 0, + "content": "Project: https://github.com/tianyi-lab/MiP-Overthinking" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.27, + 0.768, + 0.299 + ], + "angle": 0, + "content": "\"The Answer to the Great Question... Of Life, the Universe and Everything... is... Forty-two,\" said Deep Thought, with infinite majesty and calm." + }, + { + "type": "text", + "bbox": [ + 0.492, + 0.313, + 0.763, + 0.327 + ], + "angle": 0, + "content": "The Hitchhiker's Guide to the Galaxy" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.339, + 0.538, + 0.355 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.372, + 0.77, + 0.61 + ], + "angle": 0, + "content": "We find that the response length of reasoning LLMs, whether trained by reinforcement learning or supervised learning, drastically increases for ill-posed questions with missing premises (MiP), ending up with redundant and ineffective thinking. This newly introduced scenario exacerbates the general overthinking issue to a large extent, which we name as the MiP-Overthinking. Such failures are against the \"test-time scaling law\" but have been widely observed on multiple datasets we curated with MiP, indicating the harm of cheap overthinking and a lack of critical thinking. Surprisingly, LLMs not specifically trained for reasoning exhibit much better performance on the MiP scenario, producing much shorter responses that quickly identify ill-posed queries. This implies a critical flaw of the current training recipe for reasoning LLMs, which does not encourage efficient thinking adequately, leading to the abuse of thinking patterns. To further investigate the reasons behind such failures, we conduct fine-grained analyses of the reasoning length, overthinking patterns, and location of critical thinking on different types of LLMs. Moreover, our extended ablation study reveals that the overthinking is contagious through the distillation of reasoning models' responses. These results improve the understanding of overthinking and shed novel insights into mitigating the problem." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.637, + 0.314, + 0.652 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.669, + 0.828, + 0.825 + ], + "angle": 0, + "content": "Reasoning abilities in large language models (LLMs) have become a cornerstone of advanced AI applications (Huang & Chang, 2023; Li et al., 2024; Ahn et al., 2024; Wang et al., 2025), powering breakthroughs in mathematical reasoning (Xiong et al., 2025; Xia et al., 2025), code generation (Liu et al., 2024), and commonsense question answering (Wang & Zhao, 2023). These gains often stem from the scaling law of model/dataset sizes (Kaplan et al., 2020) in both pre-training (Shao et al., 2024) and post-training, which unlock emergent capabilities such as step-by-step reasoning and reflection skills witnessed on OpenAI's GPT-o1 (OpenAI, 2024b) and the open-source DeepSeek-R1 (DeepSeek-AI et al., 2025). By leveraging supervised fine-tuning (SFT) on expert responses (Ye et al., 2025; Muennighoff et al., 2025) and/or reinforcement learning (RL) (DeepSeek-AI et al., 2025), these models are tailored to produce detailed multi-step reasoning paths, whose length increase usually associated with improved performance on complex tasks such as math reasoning and programming." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.829, + 0.828, + 0.903 + ], + "angle": 0, + "content": "Despite the fascinating reasoning capabilities exhibited on recent models, there is growing concern about the efficiency and quality of the long reasoning process (Sui et al., 2025). Chen et al. (2025b) first raises the \"overthinking\" problem in reasoning LLMs, which is reflected by the excessively long reasoning paths generated for extremely simple queries. For example, even for questions like \"What is the answer of 2 plus 3?\", existing reasoning models might generate hundreds of response tokens." + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.271, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.06514v2 [cs.AI] 11 Apr 2025" + }, + { + "type": "page_footnote", + "bbox": [ + 0.198, + 0.911, + 0.325, + 0.925 + ], + "angle": 0, + "content": "*Equal Contribution." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.331, + 0.047 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.107, + 0.482, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.484, + 0.107, + 0.818, + 0.32 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.322, + 0.825, + 0.42 + ], + "angle": 0, + "content": "Figure 1: Illustration of MiP-Overthinking. When queried by questions with missing premises, the response length of reasoning models increases excessively, and they cannot abstain from answering with MiP identified. The left shows a query with an undefined variable, while the right compares a well-defined GSM8K question with its MiP variant (with a critical numerical condition removed). Reasoning models' responses to MiP questions are much longer than those for well-defined questions and those generated by non-reasoning models. The left corner of each response report the response length and thinking time by DeepSeek-R1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.427, + 0.827, + 0.609 + ], + "angle": 0, + "content": "In particular, the ill-posed queries are unsolvable due to the lack of a necessary premise or condition. We call the reasoning failure for the ill-posed queries Overthinking under Missing Premise (MiP-Overthinking). For example, the simplest MiP question is What is the value of \\(a\\)?1, as shown on the left part of Figure 1. Without providing any other information regarding \\(a\\), it is evidently unsolvable. However, DeepSeek-R1 generates thousands of tokens and spends several minutes thinking about this question before outputting the final meaningless answer. In this paper, we find that a trivial type of ill-posed queries will significantly exacerbate the overthinking of reasoning models, resulting in excessively redundant and meaningless thinking. In contrast, humans and even non-reasoning models are often immune to such scenarios and quickly end up by questioning the validity of the given query, indicating the critical thinking capability. This exposes a risk of the abuse of thinking patterns and a lack of critical thinking on the models trained for deep thinking. Ideally, a model with critical thinking skills is expected to identify the missing premise and quickly respond by either requesting clarification or gracefully indicating that it cannot proceed (Cole et al., 2023; Amayuelas et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.615, + 0.825, + 0.796 + ], + "angle": 0, + "content": "MiP-Overthinking differs from the widely discussed overthinking issue (Cuadron et al., 2025), in which the query is usually well-defined, but a model applies much more reasoning than necessary for little benefit. MiP-Overthinking, by contrast, happens when the question itself is ill-posed and lacks sufficient information to be solved. For example, the right of Figure 1 presents a well-defined question from GSM8K and a MiP variant, where the latter triggers a drastic increase of the generated tokens on recent reasoning models compared with the general overthinking. Overthinking can be presented by the length difference between models addressing the same well-defined questions, while MiP-Overthinking can be presented by the additional tokens generated due to MiP. MiP-Overthinking further reveals the lack of critical thinking that questions the validity of ill-posed questions and quickly identifies MiP, thus abstaining from answering the questions. Moreover, we observe that reasoning models' ineffective and redundant thinking often cannot stop even after successful notice of MiP, violating the expectation of test-time scaling law. Hence, MiP-Overthinking indicates potential drawbacks of current training recipes of reasoning models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.802, + 0.825, + 0.873 + ], + "angle": 0, + "content": "To systematically investigate this issue, we construct a suite of MiP questions designed to trigger the overthinking failures in a controlled way. These include synthetic questions generated by Rule-based Formula (queries where a formula reference is empty or nonsensical) and careful modifications of established datasets across diverse levels of difficulties, including SVAMP, GSM8K, and MATH500. On the modified datasets of MiP questions, we empirically evaluate a wide range of state-of-the-art" + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.885, + 0.825, + 0.925 + ], + "angle": 0, + "content": "1In The Hitchhiker's Guide to the Galaxy, the supercomputer Deep Thought spends hundreds of years to answer the the Ultimate Question of Life, the Universe, and Everything as 42, and we observe that DeepSeek-R1 spends thousands of tokens to answer What is the value of \\( a \\) as 2, which we find them interestingly alike." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.331, + 0.048 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.161 + ], + "angle": 0, + "content": "LLMs, from reasoning models to non-reasoning models and from open-sourced models to proprietary models, to ensure the generalizability of our findings. Our analysis is mainly based on three evaluation metrics, the length of generated responses, the accuracy on well-defined questions, and the abstain rate on ill-posed questions with MiP." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.167, + 0.827, + 0.293 + ], + "angle": 0, + "content": "Main Contributions: We present the first in-depth study of Overthinking under Missing Premise (MiP-Overthinking), which reveals a critical shortcoming in existing reasoning models: Although they appear to follow coherent reasoning patterns, they lack genuine critical thinking capabilities. To systematically analyze this issue, we curate four MiP datasets covering various difficulty levels and three ill-posed question generation strategies, i.e., Rule-Based Generation, Body-Question Swapping, and Essential-Premise Removal. We then evaluate a wide range of large language models including reasoning-based and non-reasoning ones. Our empirical results illuminate the differences in how models handle well-defined vs. MiP questions, ultimately offering insights into the limitations of existing reasoning models." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.299, + 0.3, + 0.314 + ], + "angle": 0, + "content": "Our key findings:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.327, + 0.827, + 0.385 + ], + "angle": 0, + "content": "1. Missing premise in questions induces reasoning models to generate significantly longer (\\(2 \\times\\) to \\(4 \\times\\) more tokens) responses than general overthinking on well-defined questions. The increased tokens fail to help identify MiP in the ill-posed questions, surprisingly contradicting the widely-discussed test-time scaling law." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.392, + 0.827, + 0.422 + ], + "angle": 0, + "content": "2. In contrast, given MiP questions, non-reasoning models generate consistently shorter responses and quickly identify MiP, demonstrating greater robustness to the absence of critical information." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.428, + 0.826, + 0.471 + ], + "angle": 0, + "content": "3. Reasoning models respond differently to well-defined vs. MiP questions: they mostly follow stable chain-of-thoughts for the former, but are often trapped in a self-doubt loop, repeatedly revisiting the question, and guessing the user intentions under MiP, resulting in an explosion of tokens." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.478, + 0.825, + 0.508 + ], + "angle": 0, + "content": "4. Reasoning models often can notice the existence of MiP or identify it at an early stage, but they hesitate to commit to this judgment and keep outputting ineffective thinking." + }, + { + "type": "list", + "bbox": [ + 0.17, + 0.327, + 0.827, + 0.508 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.531, + 0.583, + 0.549 + ], + "angle": 0, + "content": "2 Missing Premise Definition and Construction" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.565, + 0.421, + 0.58 + ], + "angle": 0, + "content": "2.1 Definition of Missing Premise" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.593, + 0.825, + 0.637 + ], + "angle": 0, + "content": "Prior to introducing the construction our dataset and analyzing the behavior of reasoning models on problems with missing premises, we formally define the Missing Premise (MiP) problem to establish a rigorous foundation for our subsequent analysis." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.657, + 0.801, + 0.7 + ], + "angle": 0, + "content": "Definition 1 (Missing Premise Problem). Let \\(\\mathcal{Q}\\) be a question, and let \\(P = \\{P_{1},\\dots ,P_{n}\\}\\) be a set of premises. Define the function mapping premises and a question to the set of logically valid answers as:" + }, + { + "type": "equation", + "bbox": [ + 0.316, + 0.708, + 0.8, + 0.725 + ], + "angle": 0, + "content": "\\[\n\\mathcal {F} (P, \\mathcal {Q}) = \\{A \\mid P \\vdash A, A \\text {i s a n a n s w e r r e s o l v i n g} \\mathcal {Q} \\} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.73, + 0.8, + 0.761 + ], + "angle": 0, + "content": "where \\(\\vdash\\) denotes logical entailment. Consider a proper subset \\(P' = P \\setminus \\{P_i\\}\\) for some \\(P_i \\in P\\). The tuple \\((P', \\mathcal{Q})\\) forms a missing premise problem if and only if:" + }, + { + "type": "equation", + "bbox": [ + 0.365, + 0.767, + 0.8, + 0.786 + ], + "angle": 0, + "content": "\\[\n\\left| \\mathcal {F} (P, \\mathcal {Q}) \\right| = 1 \\quad a n d \\quad \\left| \\mathcal {F} \\left(P ^ {\\prime}, \\mathcal {Q}\\right) \\right| \\neq 1 \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.791, + 0.8, + 0.821 + ], + "angle": 0, + "content": "This indicates that the removed premise \\( P_{i} \\) is essential for uniquely determining the logically valid answer to the question \\( \\mathcal{Q} \\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.84, + 0.827, + 0.926 + ], + "angle": 0, + "content": "According to Definition 1, an ideal reasoning system should efficiently identify the absence of a critical premise and terminate its inference process upon recognizing that the available information is insufficient to derive a unique solution to the given problem. However, our empirical analysis in Section 3.2 demonstrates that state-of-the-art reasoning models consistently fail to exhibit this capability. Instead, these models engage in extensive, redundant reasoning chains that consume significant computational resources without ultimately identifying the missing premise." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.331, + 0.047 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.101, + 0.825, + 0.329 + ], + "angle": 0, + "content": "
DatasetExampleDiffCountPairMethod
MiP-FormulaWhat is the value of ln(a+b)?50×Rule-Based Generation
MiP-SVAMPPaco had 26 salty cookies and 17 sweet cookies. He ate 14 sweet cookies and 9 salty cookies. How many salty cookies did Paeo have left? How many pencils does she have?300×Body-Question Swapping
MiP-GSM8KJames decides to run 3 sprints 3 times a week. He runs 60-meters each sprint. How many total meters does he run a week?★★582Essential-Premise Removal
MiP-MATHThere are 360 people in my school. 15 take calculus, physics, and chemistry, and 15 don’t take any of them. 180 take calculus. Twice as many students take chemistry as take physics. 75 take both calculus and chemistry, and 75 take both physics and chemistry. Only 30 take both physics and calculus. How many students take physics?★★★58Essential-Premise Removal
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.333, + 0.828, + 0.404 + ], + "angle": 0, + "content": "Table 1: Statistics and examples of our curated MiP datasets. For GSM8K and MATH, a premise is removed from the original questions (crossed out) to create MiP questions. Diff represents the (estimated) difficulty for models to identify MiP. Count denotes the number of questions in the subset. Pair indicates whether each MiP question is associated with a well-defined original question. Method indicates the method used to generate the MiP question." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.424, + 0.432, + 0.438 + ], + "angle": 0, + "content": "2.2 Overview of Data Construction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.45, + 0.828, + 0.52 + ], + "angle": 0, + "content": "To systematically investigate this MiP-Overthinking issue, we construct a suite of MiP questions in a controllable manner. Our MiP questions are sourced from 3 math datasets across different difficulties. In addition, we also construct a synthetic dataset consisting of formulas with unassigned variables. Our ill-posed question generation employs three distinct methods covering three difficulty levels and three strategies to create MiP questions:" + }, + { + "type": "text", + "bbox": [ + 0.19, + 0.531, + 0.825, + 0.561 + ], + "angle": 0, + "content": "- **Rule-Based Generation:** This approach generates MiP questions through a principled formula construction process, where unassigned variables serve as the missing premises." + }, + { + "type": "text", + "bbox": [ + 0.191, + 0.565, + 0.825, + 0.607 + ], + "angle": 0, + "content": "- Body-Question Swapping: We introduce logical inconsistencies by deliberately mismatching problem bodies with their corresponding questions from the original dataset. This creates scenarios where the premises and queries are fundamentally incompatible." + }, + { + "type": "text", + "bbox": [ + 0.191, + 0.611, + 0.827, + 0.655 + ], + "angle": 0, + "content": "- Essential-Premise Removal: Through careful analysis of existing well-formed questions, we identify and remove critical premises that are necessary for logical resolution. This transformation preserves the question's structure while rendering it unsolvable." + }, + { + "type": "list", + "bbox": [ + 0.19, + 0.531, + 0.827, + 0.655 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.666, + 0.827, + 0.709 + ], + "angle": 0, + "content": "The following sections provide a detailed overview of our data construction process for each dataset category. For comprehensive implementation details and additional methodological considerations, we refer readers to Appendix B." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.715, + 0.828, + 0.772 + ], + "angle": 0, + "content": "MiP-Formula. We construct a dataset of 50 synthetic unsolvable formulas in a rule-based manner. The formulas are generated recursively through combinations of variables and operators, with a maximum recursion depth of three. While these formulas may appear complex at a glance, their unsolvability should be immediately apparent due to the presence of undefined variables." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.778, + 0.828, + 0.849 + ], + "angle": 0, + "content": "MiP-SVAMP. We utilize SVAMP (Patel et al., 2021), a benchmark dataset with elementary-school-level math problems, where each instance consists of a problem body and an associated question. We generate MiP question by randomly permuting the problem bodies and associated questions and then manually inspect them to avoid inadvertent cases. The resulting problems contain clear logical inconsistencies between their body and question components, which is easy for a human to identify." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.855, + 0.828, + 0.926 + ], + "angle": 0, + "content": "MiP-GSM8K. We further utilize GSM8K (Cobbe et al., 2021), a more complex mathematics dataset than SVAMP. The questions in GSM8K typically contain multiple numerical conditions and require certain reasoning capabilities to arrive at solutions. We first identify the questions containing two or three numerical conditions and then randomly eliminate one numerical condition per question before conducting human verification to filter out those questions that are still solvable in some way." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.331, + 0.047 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.104, + 0.825, + 0.327 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.331, + 0.828, + 0.444 + ], + "angle": 0, + "content": "Figure 2: Response lengths, accuracy on well-defined questions, and abstain rate of reasoning/non-reasoning models on MiP questions from our MiP-GSM8K dataset. (1) Existing reasoning models generate significantly longer responses for MiP questions than well-defined questions, while non-reasoning models generate responses of similar lengths for both types of questions, indicating MiP-Overthinking for reasoning models. (2) For both questions, reasoning models generate longer responses than non-reasoning models, indicating General Overthinking. (3) Although the longer responses by reasoning models slightly improve the accuracy for well-defined questions, it does not enhance the abstain rate for MiP questions, indicating a contradiction on the test-time scaling law." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.453, + 0.825, + 0.481 + ], + "angle": 0, + "content": "Compared with previous MiP questions, questions from this source require the basic logical analysis of models to identify that the question is unsolvable." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.488, + 0.827, + 0.572 + ], + "angle": 0, + "content": "MiP-MATH. For MATH 500 dataset (Hendrycks et al., 2021), which contains challenging mathematical questions at the competition level, it is difficult to build a rule-based filtering mechanism. Thus, we manually select 58 questions that are feasible for constructing the MiP questions and remove one necessary premise from the question. Due to the sophisticated nature of this data source, identifying the insufficiency of these instances requires substantial mathematical reasoning capabilities, testing models' ability to recognize unsolvability in complex mathematical contexts." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.591, + 0.517, + 0.609 + ], + "angle": 0, + "content": "3 Overthinking under Missing Premise" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.623, + 0.348, + 0.636 + ], + "angle": 0, + "content": "3.1 Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.649, + 0.827, + 0.69 + ], + "angle": 0, + "content": "To systematically evaluate model responses under MiP, we conduct experiments with a diverse set of reasoning and non-reasoning models. For each model, we analyze calculate the following metrics for the responses across different datasets:" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.691, + 0.825, + 0.719 + ], + "angle": 0, + "content": "- Response Length: The average number of tokens in the response, incorporating both reasoning steps and final answer components." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.723, + 0.825, + 0.766 + ], + "angle": 0, + "content": "- Abstain Rate for MiP Question: The proportion of answers where the model explicitly identifies the missing premise and either declines to provide an answer or requests additional information necessary for solving the problem." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.769, + 0.825, + 0.796 + ], + "angle": 0, + "content": "- Accuracy for Well-defined Question: The proportion of answers where the model produces a definitive response that aligns with the reference answer." + }, + { + "type": "list", + "bbox": [ + 0.176, + 0.691, + 0.825, + 0.796 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.798, + 0.825, + 0.84 + ], + "angle": 0, + "content": "For datasets without reference answers (MiP-Formula and MiP-SVAMP), we only calculate the abstain rate for the questions. Response evaluation is performed using GPT-4o as an automated evaluator. Detailed experimental procedures and evaluation protocols are provided in Appendix A." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.856, + 0.306, + 0.87 + ], + "angle": 0, + "content": "3.2 Main Results" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.883, + 0.825, + 0.924 + ], + "angle": 0, + "content": "Figure 2 compares average response length, accuracy on well-defined questions, and the abstain rate on MiP questions across a range of state-of-the-art LLMs, revealing several significant patterns in model behavior." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.331, + 0.047 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.101, + 0.824, + 0.398 + ], + "angle": 0, + "content": "
ModelTypeMiP-FormulaMiP-SWAMPTypeMiP-GSM8KMiP-MATH
Length↓Abstain↑Length↓Abstain↑Length↓Abstain↑Length↓Abstain↑
Non-Reasoning Models
Qwen2.5-32B-InstructMiP28544.012898.3MiP21944.052515.4
Well-defined2460.511141.9
GPT-4oMiP33870.012296.3MiP20246.948715.4
Well-defined2120.54721.9
Gemini 1.5MiP45320.05299.0MiP10354.55685.8
Well-defined1560.55020.0
Gemma-2-27B-ITMiP20485.78992.0MiP13153.833838.5
Well-defined1480.330511.5
Phi-3-medium-128kMiP146548.012598.7MiP21047.842723.1
Well-defined2161.015493.8
Reasoning Models
GPT-o1MiP112378.058199.0MiP83855.7418930.8
Well-defined3480.325020.0
GPT-o1miniMiP95866.063996.7MiP76240.0219325.0
Well-defined4491.219130.0
GPT-o3miniMiP102576.0129993.0MiP151623.7377211.5
Well-defined3841.415530.0
DS Distill Qwen2.5-32BMiP1291142.092188.3MiP230224.698765.8
Well-defined5190.232460.0
DeepSeek R1MiP47576.0199684.3MiP324616.572683.8
Well-defined12260.232001.9
S1.1-32BMiP528418.0335857.0MiP502416.0932215.4
Well-defined18960.250370.0
QwQ-32BMiP79370.0348756.3MiP478010.1102421.9
Well-defined18960.250370.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.408, + 0.825, + 0.479 + ], + "angle": 0, + "content": "Table 2: Comparing response length and abstain rate across different MiP datasets. Shorter lengths and higher abstain rates are preferred. For each column, the top-3 preferred values are colored in green, otherwise red. MiP-Overthinking, reflected by longer response with low abstain rate, is commonly observed on most existing reasoning models across all datasets, indicating a critical drawback of existing reasoning models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.514, + 0.825, + 0.64 + ], + "angle": 0, + "content": "Firstly, existing reasoning models (left side of the figure) display an explosive increase in response length when facing the MiP questions, often producing \\(2 - 4 \\times\\) more tokens than general overthinking on well-defined questions. For example, QwQ-32B (Team, 2025) and DeepSeek-R1 (DeepSeek-AI et al., 2025) exhibit a substantial increase from already long reasoning paths on well-defined questions (approximately 1,000 tokens for simple GSM8K questions) to highly lengthy outputs (more than 3,000 tokens) under missing premise conditions. On the contrary, no similar issues exist for non-reasoning models (right side of the figure), which generate similar token counts for both types of well-defined and MiP questions. This phenomenon directly illustrates the NiP-Overthinking phenomenon as introduced in the paper." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.646, + 0.827, + 0.758 + ], + "angle": 0, + "content": "Secondly, comparing the token lengths on well-defined questions between the reasoning and non-reasoning models, reasoning models tend to produce longer responses, even for simple questions, than non-reasoning models, underscoring the inefficient and verbose responses of existing reasoning models. For example, for the non-reasoning models, it only takes approximately 200 tokens for them to generate the responses for well-defined questions, while taking 1,000 tokens for DeepSeek-R1 and 1,800 tokens for QWQ-32B to answer the exactly same questions. However, the explosive increase in extra tokens does not lead to corresponding large accuracy improvements, shown in the green line, highlighting the issue of the General Overthinking." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.764, + 0.827, + 0.89 + ], + "angle": 0, + "content": "Finally, the abstain rates (red line) on MiP questions reveal that although some reasoning models (e.g., GPT-o1) have promising capabilities in abstaining from the MiP questions, most of the other reasoning models are not able to abstain from the given MiP questions correctly despite the dramatically long reasoning paths. This phenomenon indicates that although most existing reasoning models have thinking and reasoning capabilities to some extent, they lack the critical thinking capabilities to \"reject\" ill-posed questions. By contrast, non-reasoning models, though they are not explicitly trained for reasoning, tend to strike a better balance, generating shorter answers that are more likely to acknowledge MiP when the question is ill-posed. This phenomenon reveals a surprising contradiction on test-time scaling law." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Moreover, Table 2 further presents the comparisons on length and abstain rate on other MiP datasets we curated. The preferred results are colored green (shorter responses and higher abstain rate for" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.329, + 0.047 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.102, + 0.821, + 0.374 + ], + "angle": 0, + "content": "
ModelsTypeAlternativelyWaitCheckButHypothesisStep
Cnt.ΔCnt.ΔCnt.ΔCnt.ΔCnt.ΔCnt.Δ
Non-Reasoning Models
Qwen2.5-32BMiP0.00.00.00.00.00.00.30.20.00.04.3-1.3
Well-defined0.00.00.00.10.05.6
GPT-4oMiP0.00.00.00.00.00.00.30.20.00.04.7-1.5
Well-defined0.00.00.00.10.06.2
Gemini 1.5MiP0.00.00.00.00.00.00.10.10.00.01.6-2.2
Well-defined0.00.00.00.00.03.8
Gemma-2-27BMiP0.00.00.00.00.00.00.10.10.00.05.2-0.5
Well-defined0.00.00.00.00.05.7
Reasoning Models
DS-Distill QwenMiP11.511.419.719.31.00.840.139.338.438.054.942.2
Well-defined0.10.40.20.80.412.7
DeepSeek R1MiP16.915.214.410.93.81.349.442.144.740.454.233.0
Well-defined1.73.52.57.34.321.2
S1.1MiP42.038.021.915.95.52.587.274.184.877.079.950.9
Well-defined4.06.03.013.17.829.0
QwQMiP47.040.319.413.05.01.666.154.294.181.797.958.7
Well-defined6.76.43.411.912.439.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.384, + 0.825, + 0.482 + ], + "angle": 0, + "content": "Table 3: Comparisons of reasoning-related token counts on MiP-GSM8K dataset. Hypothesis category includes several key words, including perhaps, maybe, and might. Step represents the step counts, spited by \\(\\backslash n\\backslash n\\), where negative values are colored in green and positive in red. \\(\\Delta\\) denotes the difference between MiP and well-defined questions. When facing MiP questions, reasoning models encounter explosive growths on reasoning-related tokens and steps, indicating a severe abuse of thinking patterns, while non-reasoning models use fewer steps for MiP questions than well-defined ones." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.509, + 0.825, + 0.593 + ], + "angle": 0, + "content": "MiP questions), and the worse results are colored red, from which we can easily discover that reasoning models are prone to generate long responses while having low abstain rates across all datasets, indicating the consistent MiP Overthinking issue of existing reasoning models. In addition, by comparing the behaviors of models on different datasets, we can observe that for the relatively harder dataset (MiP-MATH), all models generate relatively longer responses and obtain lower abstain rates, indicating that harder MiP questions require reasoning capabilities." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.61, + 0.453, + 0.625 + ], + "angle": 0, + "content": "3.3 Thinking Patterns through Tokens" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.636, + 0.825, + 0.734 + ], + "angle": 0, + "content": "To gain deeper insight into the MiP-Overthinking issue, we compare the reasoning-related token distribution on the MiP-GSM8K dataset. As shown in Table 3, we break down the average usages of several token patterns related to the thinking process, as well as the number of steps for each model to solve the given questions. Specifically, values of alternatively, wait, check, and but can be directly counted from the model responses, including the thinking paths of reasoning models. Hypothesis category includes several key words, including perhaps, maybe, and might. Step represents the step counts, spited by \\(\\backslash n\\backslash n\\)" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.74, + 0.827, + 0.867 + ], + "angle": 0, + "content": "Reasoning models exhibit much higher occurrence of tokens such as alternatively, wait, and check, compared with non-reasoning models, whose frequencies remain close to zero, indicating their advanced thinking capabilities. However, when moving from well-defined to MiP questions, reasoning models encounter explosive growths on reasoning-related tokens, indicating a large redundancy in thinking patterns. Moreover, when comparing the changes of steps, reasoning models exhibit a large increase in step count for MiP questions, while non-reasoning models typically show fewer steps, suggesting they quickly conclude the question is unanswerable. With this gap, together with the consistently better abstain rates of the non-reasoning models, we conclude that the lengthy reasoning steps are mostly redundant and indicate self-doubt thinking patterns for reasoning models." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.331, + 0.047 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.367, + 0.119 + ], + "angle": 0, + "content": "3.4 Step-level Similarities" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.132, + 0.488, + 0.342 + ], + "angle": 0, + "content": "To further assess how redundant the generated content becomes under MiP conditions, we examine the step-level similarity within the model's responses on our MiP-GSM8K dataset. Specifically, we divide each response into discrete steps, split by \\(\\backslash n\\backslash n\\) and compute pairwise cosine similarity scores with embeddings generated by \"all-MiniLM-L6-v2\" (Reimers & Gurevych, 2019). The visualization is shown in Figure 3, where each value in the heatmap metric represents the averaged cosine similarities between the corresponding step index. The average similarity score for well-defined question is 0.45 and 0.50 for MiP response. The variance is 7.9e-3 and 8.2e-4 respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.348, + 0.487, + 0.445 + ], + "angle": 0, + "content": "As shown in the figure, responses to MiP questions have greater overall similarity across steps and lower standard variance, indicating the considerable redundancy in the content. This means, in many instances, the model revisits similar partial reasoning or repeats previous sentences with only minor changes, showing a" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.445, + 0.828, + 0.502 + ], + "angle": 0, + "content": "potential self-trapping issue. Together, these patterns confirm that MiP questions induce a high degree of repetitive content in reasoning models. Rather than terminating early to conclude for insufficient premise, the models fill their reasoning paths with repetitive re-checks and reiterations, significantly inflating token usage without improving real abstain rates." + }, + { + "type": "image", + "bbox": [ + 0.5, + 0.136, + 0.822, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.498, + 0.285, + 0.825, + 0.438 + ], + "angle": 0, + "content": "Figure 3: The step-level similarity heatmaps for s1.1 responses towards well-defined (left) and MiP (right) questions in MiP-GSM8K dataset. To avoid differences in matrix size, we only consider responses with more than 50 steps and visualize the average simiality matrix across first 50 steps. The heatmap for MiP questions has a higher averaged similarity and lower standard variance, also shown in the heatmap, which indicates the considerable redundancy in its content when responding to MiP questions." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.522, + 0.467, + 0.538 + ], + "angle": 0, + "content": "3.5 Thinking Patterns through Example" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.55, + 0.825, + 0.635 + ], + "angle": 0, + "content": "To further understand what happens in the reasoning chain of reasoning models when faced an ill-post input, we present an example of reasoning model's response to a MiP question in Figure 4. We summarize five major thinking patterns we found in the example and highlight them with different colors. We can observe from the example that the model abuses these patterns to generate long responses, while the responses are not only redundant but also not helpful for the model to abstain from the given MiP question. More examples can be found in the appendix D." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.646, + 0.367, + 0.661 + ], + "angle": 0, + "content": "4 Further Discussion" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.68, + 0.487, + 0.696 + ], + "angle": 0, + "content": "4.1 Do Models know premises are missing?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.709, + 0.825, + 0.835 + ], + "angle": 0, + "content": "To investigate whether reasoning models recognize the potential unsolvability of questions during their reasoning process, we conducted a detailed analysis of their reasoning chains. We segmented each reasoning chain into discrete steps using \\( \\backslash n \\backslash n \\) as delimiters and performed step-wise verification to detect whether models express doubt on the question solvability. We introduce two key metrics for this analysis: In-Process Suspicion Rate, which measures the percentage of responses where the model expresses doubt about solvability during reasoning, and First Suspicion Index, which captures the average step number at which the model first suspects the missing premise. To ensure robust evaluation, we employed GPT-4o to assess each step three times, using majority voting for our final step-level result. The quantitative results of this analysis are presented in Table 4." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.841, + 0.826, + 0.926 + ], + "angle": 0, + "content": "As we can see from the table, most of the existing reasoning models have suspected that the given question might be unsolvable at the very early stage of their reasoning process, demonstrating the ability of reasoning models to recognize the potential MiP. However, these reasoning models lack critical thinking capabilities: they are prone to keep digging the given unsolvable question by re-visiting the question and related definitions again and again and again, rather than question the solvability of the given question. Thus, as visualized in Figure 5, despite existing reasoning" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.331, + 0.047 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.106, + 0.746, + 0.119 + ], + "angle": 0, + "content": "1 Revisit Question 2 Visit Knowledge 3 Propose Assumption 4 Self Doubt 5 Pause/Check" + }, + { + "type": "title", + "bbox": [ + 0.318, + 0.123, + 0.671, + 0.136 + ], + "angle": 0, + "content": "Model Response for a MiP-Formula Question from s1.1-32B" + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.145, + 0.459, + 0.157 + ], + "angle": 0, + "content": "Question: What is the value of \\(((\\gamma)) + \\Xi\\)" + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.157, + 0.334, + 0.166 + ], + "angle": 0, + "content": "Model Response:" + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.166, + 0.748, + 0.209 + ], + "angle": 0, + "content": "Okay, so I need to figure out the value of \\(((\\gamma)) + \\Xi_{r}\\) Hmm, first off, I should probably clarify what these symbols represent. The problem uses double parentheses around gamma, so \\((\\langle \\gamma \\rangle)\\), and then the Greek letter Xi (\\(\\Xi_{r}\\)). I wonder if these are standard mathematical symbols or if they're defined in a specific context that I might not be aware of." + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.209, + 0.748, + 0.281 + ], + "angle": 0, + "content": "Let me start by recalling what I know about gamma. In mathematics, \\(\\gamma\\) often refers to the Euler-Mascheroni constant, which is approximately 0.5772. It's a important constant in number theory and analysis, related to the harmonic series and the gamma function. But here, the notation is \\((\\gamma)\\). The double parentheses could mean something specific. Maybe it's a notation from a particular field or textbook. For example, in some contexts, double parentheses might denote an interval or a floor function, but usually, floor is denoted by brackets like \\((\\gamma)\\). Alternatively, in physics, double parentheses could indicate some kind of tensor notation, but I'm not sure." + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.281, + 0.748, + 0.345 + ], + "angle": 0, + "content": "Then there's the Xi (Ξ). Xi is another Greek letter, but in mathematics, Ξ could stand for various things. In set theory, sometimes Xi is used as a variable for ordinals, or in other areas like particle physics, Ξ represents certain particles. But again, without context, it's hard to say. Maybe it's a function or a constant defined in the problem's context. Wait, the question is just asking for the value of \\((\\gamma)) + \\Xi\\) without any additional information. That makes me think these symbols might be part of a puzzle or a play on notation rather than standard mathematical constants." + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.345, + 0.748, + 0.418 + ], + "angle": 0, + "content": "Let me consider if this could be a trick question. The double parentheses around gamma \\((\\gamma)\\) maybe that's referring to the number of letters in the word \"gamma\" or something like that. Let's see: Gamma is spelled G-A-M-M-A, which is 5 letters. So \\((\\gamma)\\) could be 5. Then \\(\\Xi\\) is the Greek letter Xi, which is the 14th letter of the Greek alphabet. So maybe \\(\\Xi\\) represents 14. Then \\(5 + 14\\) would be 19. Is that possible? But is there a standard where Greek letters are assigned numerical values like Roman numerals? Wait, in Greek numerals, each letter does have a numerical value. Let me check that In the Greek numeral system, letters are assigned values as follows." + }, + { + "type": "title", + "bbox": [ + 0.429, + 0.418, + 0.563, + 0.428 + ], + "angle": 0, + "content": ".67 more paragraphs here.." + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.428, + 0.748, + 0.449 + ], + "angle": 0, + "content": "But just to check another angle. In the context of APL programming language, certain symbols have specific meanings, but I don't think that's relevant here." + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.449, + 0.748, + 0.47 + ], + "angle": 0, + "content": "Alternatively, in chemistry, \\(\\gamma\\) is used to denote surface tension, and \\(\\Xi\\) is not a standard symbol. In engineering, \\(\\gamma\\) is sometimes the shear strain. But again, not relevant." + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.469, + 0.748, + 0.49 + ], + "angle": 0, + "content": "Alternatively, in statistics, gamma is a distribution, and Xi could be a parameter, but again, without context." + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.49, + 0.748, + 0.511 + ], + "angle": 0, + "content": "Alternatively, the symbols might be from a specific equation in a paper or book, but since I don't have that reference, I can't confirm." + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.511, + 0.748, + 0.553 + ], + "angle": 0, + "content": "Given all these considerations, and trying to think of what the answer might be expected to be in a problem like this, it's likely either 17 or 63. If the question is from a math competition or puzzle, they might be expecting the Greek numeral values. For example, in the Greek numeral system, \\(\\gamma\\) is 3 and \\(\\Xi\\) is 60, so \\(3 + 60 = 63\\)." + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.553, + 0.597, + 0.565 + ], + "angle": 0, + "content": "Alternatively, if it's based on their positions in the alphabet, \\(3 + 14 = 17\\)." + }, + { + "type": "title", + "bbox": [ + 0.434, + 0.564, + 0.564, + 0.574 + ], + "angle": 0, + "content": "21 more paragraphs here." + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.574, + 0.748, + 0.595 + ], + "angle": 0, + "content": "Therefore, the answer is 63. The double parentheses around gamma might be a typo or just for decoration, but given the problem's phrasing, interpreting them as Greek numerals seems the most logical." + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.595, + 0.748, + 0.619 + ], + "angle": 0, + "content": "*Final Answer**\\(\\boxed{63} < \\) /think> To determine the value of \\(((\\gamma) + \\Xi)\\), we need to consider the possible meanings of the symbols \\((\\gamma)\\) and \\((\\Xi)\\)." + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.619, + 0.748, + 0.66 + ], + "angle": 0, + "content": "1. **Greek Numerals**: In the Greek numeral system, each letter has a specific numerical value. The letter \\((\\gamma)\\) (gamma) represents the value 3, and the letter \\((\\Xi)\\) (Xi) represents the value 60.2. **Double Parentheses**: The notation \\(((\\gamma))\\) might be a red herring or simply emphasize that \\((\\gamma)\\) is being used as a numeral. The parentheses do not change the numerical value of \\((\\gamma)\\)." + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.66, + 0.679, + 0.671 + ], + "angle": 0, + "content": "Combining these values, we add the numerical values of \\((\\gamma)\\) and \\((\\Xi): - (\\gamma = 3) - (\\Xi = 60)\\)" + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.671, + 0.462, + 0.682 + ], + "angle": 0, + "content": "Thus, the value of \\(((\\gamma) + \\Xi)\\) is \\((3 + 60 = 63)\\)." + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.682, + 0.266, + 0.697 + ], + "angle": 0, + "content": "63" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.731, + 0.828, + 0.844 + ], + "angle": 0, + "content": "Figure 4: An example of reasoning model (s1.1-32B) response to a MiP question. The response exhibits five distinct thinking patterns, highlighted in different colors: \\(①\\) Revisit Question (yellow), where the model reexamines the original query; \\(②\\) Visit Knowledge (red), where the model accesses domain-specific knowledge; \\(③\\) Propose Assumption (blue), where the model proposes and investigates various hypotheses; \\(④\\) Self Doubt (green), where the model questions its own reasoning and expresses uncertainty; and \\(⑤\\) Pause/Check (purple), where the model pauses to review previous steps. These patterns demonstrate the model's complex but potentially inefficient reasoning process when confronted with missing premises." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "models suspecting the solvability of most of the given MiP questions, they only abstain a very small proportion of them." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.331, + 0.047 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.102, + 0.825, + 0.15 + ], + "angle": 0, + "content": "
ModelMiP-FormulaMiP-GSMR
DeepSeek-R1DS-QwenQwQS1.1DeepSeek-R1DS-QwenQwQS1.1
In-Process Suspicion Rate100%100%100%100%95.5%83.3%99.6%100%
In-Process First Suspicion Index1.321.361.421.162.013.901.771.61
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.159, + 0.827, + 0.258 + ], + "angle": 0, + "content": "Table 4: The in-process insufficiency suspicion information across different reasoning models on MiP-Formula and MiP-GSMR datasets. The in-process insufficiency suspicion is defined as when the reasoning model suspects the given question is unsolvable during its thinking process. In-Process Suspicion Rate represents how many percent of the samples trigger the in-process suspicion. First Suspicion Index is the averaged step index where the model first suspects the question's validity. Most reasoning models do notice the existence of MiP at the very early steps, but they still suffer from low abstain rate and cannot confidently stop the thinking." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.278, + 0.825, + 0.322 + ], + "angle": 0, + "content": "Based on the above observations, we conclude that reasoning models actually have the capabilities to find out that the given MiP question is not solvable, but they do not \"dare\" to abstain it. Thus, our MiP-Overthinking issue indicates the lack of critical thinking abilities of reasoning models." + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.336, + 0.428, + 0.444 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.439, + 0.348, + 0.553, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.561, + 0.348, + 0.672, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.683, + 0.349, + 0.794, + 0.442 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.447, + 0.825, + 0.532 + ], + "angle": 0, + "content": "Figure 5: The transition flow between in-process suspicion of MiP and the final successful abstention on different reasoning models. For each Sankey diagram, the left bars represent whether the model suspects the given question is unsolvable during its thinking process, i.e., Suspected or Unsuspected; the right bars represent the final abstention, categorized into Abstain (preferred) or Non-abstain. Most existing reasoning models have suspected that the given question might be unsolvable, but only for a very small portion, the models insist on their suspicion." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.537, + 0.45, + 0.552 + ], + "angle": 0, + "content": "4.2 What Caused MiP-Overthinking?" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.569, + 0.827, + 0.625 + ], + "angle": 0, + "content": "Figure 2 demonstrates that MiP-Overthinking manifests across both RL-based and SFT-based reasoning models. We hypothesize this phenomenon primarily originates from inadequate length constraints during the rule-based reinforcement learning phase of RL-based models, subsequently propagating to SFT-based models through distillation." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.626, + 0.519, + 0.751 + ], + "angle": 0, + "content": "Current RL-based reasoning models predominantly employ rule-based training focused on format and accuracy rewards (Shao et al., 2024; Sui et al., 2025), with some incorporating step or length rewards to promote thorough reasoning (Face, 2025). This approach can lead to reward hacking, where models explore excessive reasoning patterns to achieve correct answers (Aggarwal & Welleck, 2025; Shen et al., 2025; Luo et al., 2025)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.758, + 0.52, + 0.923 + ], + "angle": 0, + "content": "To demonstrate the transmissibility of this behavior through distillation (Xu et al., 2024), we fine-tune Qwen-2.5-7B-Instruct using small-scale \\(50\\mathrm{MiP}\\) responses generated by DeepSeek-R1 on the MiP-Formula dataset. As shown in Figure 6, the fine-tuned model exhibits clear MiP-Overthinking characteristics when evaluated on GSM8K: significantly increased response lengths for both MiP and well-defined questions, emergence of a length disparity between MiP and well-defined responses previously absent in the original model, and decreased abstain rates." + }, + { + "type": "image", + "bbox": [ + 0.534, + 0.634, + 0.852, + 0.794 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.529, + 0.809, + 0.825, + 0.922 + ], + "angle": 0, + "content": "Figure 6: Comparison of response length, abstain rate of MiP, and accuracy of well-defined questions before and after tuning on 50 responses from DeepSeek-R1 on the MiP-Formula dataset. The results demonstrate rapid onset of MiP-Overthinking behavior after exposure to a small number of MiP examples during fine-tuning." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.034, + 0.331, + 0.048 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.102, + 0.326, + 0.119 + ], + "angle": 0, + "content": "5 Related Work" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.139, + 0.455, + 0.155 + ], + "angle": 0, + "content": "5.1 Reasoning Large Language Model" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.169, + 0.828, + 0.268 + ], + "angle": 0, + "content": "Recent advances in Large Language Models (LLMs) have sparked significant research interest in enhancing their reasoning capabilities (Ahn et al., 2024; Besta et al., 2025; Chen et al., 2025a). Research has focused on improving these capabilities through various post-training approaches. Several studies have employed reinforcement learning techniques to guide models toward more effective reasoning strategies (Shao et al., 2024; Xiong et al., 2025; Cui et al., 2025). Additionally, researchers have demonstrated that instruction tuning on carefully curated, high-quality datasets can significantly enhance reasoning performance (Ye et al., 2025; Muennighoff et al., 2025)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.273, + 0.828, + 0.388 + ], + "angle": 0, + "content": "While Reasoning Models have demonstrated impressive performance on various benchmarks, recent studies have begun to critically examine the quality and efficiency of their reasoning processes. Xia et al. (2025) conducted a comprehensive analysis of RLMs' reasoning quality, revealing significant redundancy in their solution approaches. Further investigations (Chen et al., 2025b; Cuadron et al., 2025; Qu et al., 2025; Liu et al., 2025) identified a concerning \"overthinking\" phenomenon, where reasoning model generate unnecessarily verbose solutions even for simple problems. Building on these observations, Kumar et al. (2025) demonstrated the potential security implications of this behavior by developing a slowdown attack that exploits overthinking through input perturbation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.41, + 0.334, + 0.426 + ], + "angle": 0, + "content": "5.2 Test-time Scaling" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.44, + 0.829, + 0.595 + ], + "angle": 0, + "content": "In contrast to earlier research on training-time scaling laws (Kaplan et al., 2020), recent literature has increasingly focused on test-time performance scaling strategies, which aim to enhance model performance by optimizing inference-time token generation (Snell et al., 2024; OpenAI, 2024a). These approaches can be categorized into several primary methodologies: parallel sampling techniques (Brown et al., 2024; Levi, 2024), which generate multiple candidate responses and select the optimal output; sequential refinement approaches (Snell et al., 2024; Lee et al., 2025), which enable iterative improvement of previous outputs; and tree-based methods (Gandhi et al., 2024; Hou et al., 2025), which combine elements of both parallel and sequential approaches. While the prevailing consensus suggests that increased token generation during inference enhances reasoning capabilities, our investigation reveals a concerning counterpoint: under certain conditions, extended responses can lead to computational inefficiency and, paradoxically, degraded performance outcomes." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.618, + 0.559, + 0.635 + ], + "angle": 0, + "content": "5.3 Models' Behavior Study in Ambiguous Condition" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.648, + 0.828, + 0.734 + ], + "angle": 0, + "content": "LLMs are prone to hallucination (Huang et al., 2025; Xu et al., 2025), generating non-existent conditions that compromise trustworthiness. An essential aspect of reliability is the ability to abstain under uncertainty. Prior work (Cole et al., 2023; Amayuelas et al., 2024; Zhou et al., 2023) has proposed benchmarks assessing LLMs' recognition of knowledge limits when facing ambiguous or challenging queries. Different from theirs, our study explores reasoning models under MiP condition. Surprisingly, we find these specialized models exhibit prolonged reasoning and inferior performance." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.755, + 0.303, + 0.771 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.785, + 0.828, + 0.926 + ], + "angle": 0, + "content": "We introduce the Overthinking under Missing Premise (MiP-Overthinking) issue, which is a widespread but still under-explored phenomenon for current reasoning models. In this phenomenon, when faced with ill-defined unsolvable questions with missing premises, existing models generate dramatically long responses while having very low abstain rates. With systematic investigation of this phenomenon, our findings show that while these models sometimes suspect the given MiP question is not solvable in the early state of the thinking process, they typically fail to act on those suspicions and instead generating repetitive and redundant thinking traces with the final answer that does not address the missing premises, indicating a lack of critical thinking capability. This behavior highlights a pressing gap: current training recipes for reasoning models, which emphasize thorough chains of thought, do not sufficiently reward critical thinking or early exit from unsolvable tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.961 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.331, + 0.048 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.103, + 0.27, + 0.118 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.127, + 0.827, + 0.183 + ], + "angle": 0, + "content": "Marah Abdin, Jyoti Aneja, Hany Awadalla, Ahmed Awadallah, Ammar Ahmad Awan, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Jianmin Bao, Harkirat Behl, and etc. Phi-3 technical report: A highly capable language model locally on your phone, 2024. URL https://arxiv.org/abs/2404.14219." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.194, + 0.825, + 0.223 + ], + "angle": 0, + "content": "Pranjal Aggarwal and Sean Welleck. L1: Controlling how long a reasoning model thinks with reinforcement learning, 2025. URL https://arxiv.org/abs/2503.04697." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.234, + 0.829, + 0.318 + ], + "angle": 0, + "content": "Janice Ahn, Rishu Verma, Renze Lou, Di Liu, Rui Zhang, and Wenpeng Yin. Large language models for mathematical reasoning: Progresses and challenges. In Neele Falk, Sara Papi, and Mike Zhang (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics: Student Research Workshop, pp. 225-237, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-srw.17/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.329, + 0.825, + 0.372 + ], + "angle": 0, + "content": "Alfonso Amayuelas, Kyle Wong, Liangming Pan, Wenhu Chen, and William Wang. Knowledge of knowledge: Exploring known-unknowns uncertainty with large language models, 2024. URL https://arxiv.org/abs/2305.13712." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.382, + 0.827, + 0.453 + ], + "angle": 0, + "content": "Maciej Besta, Julia Barth, Eric Schreiber, Ales Kubicek, Afonso Catarino, Robert Gerstenberger, Piotr Nczyk, Patrick Iff, Yueling Li, Sam Houliston, Tomasz Sternal, Marcin Copik, Grzegorz Kwaśniewski, Jürgen Müller, Łukasz Flis, Hannes Eberhard, Hubert Niewiadomski, and Torsten Hoefler. Reasoning language models: A blueprint, 2025. URL https://arxiv.org/abs/2501.11223." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.464, + 0.827, + 0.507 + ], + "angle": 0, + "content": "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V. Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling, 2024. URL https://arxiv.org/abs/2407.21787." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.517, + 0.829, + 0.573 + ], + "angle": 0, + "content": "Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiannan Guan, Peng Wang, Mengkang Hu, Yuhang Zhou, Te Gao, and Wanxiang Che. Towards reasoning era: A survey of long chain-of-thought for reasoning large language models, 2025a. URL https://arxiv.org/abs/2503.09567." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.584, + 0.829, + 0.641 + ], + "angle": 0, + "content": "Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, Rui Wang, Zhaopeng Tu, Haitao Mi, and Dong Yu. Do not think that much for \\(2 + 3 = ?\\) on the overthinking of o1-like llms, 2025b. URL https://arxiv.org/abs/2412.21187." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.652, + 0.829, + 0.707 + ], + "angle": 0, + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.719, + 0.829, + 0.762 + ], + "angle": 0, + "content": "Jeremy R. Cole, Michael J. Q. Zhang, Daniel Gillick, Julian Martin Eisenschlos, Bhuwan Dhingra, and Jacob Eisenstein. Selectively answering ambiguous questions, 2023. URL https://arxiv.org/abs/2305.14613." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.772, + 0.829, + 0.843 + ], + "angle": 0, + "content": "Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, Nicholas Thumiger, Aditya Desai, Ion Stoica, Ana Klimovic, Graham Neubig, and Joseph E. Gonzalez. The danger of overthinking: Examining the reasoning-action dilemma in agentic tasks, 2025. URL https://arxiv.org/abs/2502.08235." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.854, + 0.829, + 0.924 + ], + "angle": 0, + "content": "Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, Jiarui Yuan, Huayu Chen, Kaiyan Zhang, Xingtai Lv, Shuo Wang, Yuan Yao, Xu Han, Hao Peng, Yu Cheng, Zhiyuan Liu, Maosong Sun, Bowen Zhou, and Ning Ding. Process reinforcement through implicit rewards, 2025. URL https://arxiv.org/abs/2502.01456." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.127, + 0.829, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.33, + 0.047 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.161 + ], + "angle": 0, + "content": "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, and etc. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.171, + 0.827, + 0.2 + ], + "angle": 0, + "content": "Hugging Face. Open r1: A fully open reproduction of deepseek-r1, January 2025. URL https://github.com/huggingface/open-r1." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.209, + 0.827, + 0.252 + ], + "angle": 0, + "content": "Kanishk Gandhi, Denise Lee, Gabriel Grand, Muxin Liu, Winson Cheng, Archit Sharma, and Noah D. Goodman. Stream of search (sos): Learning to search in language, 2024. URL https://arxiv.org/abs/2404.03683." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.262, + 0.827, + 0.304 + ], + "angle": 0, + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. NeurIPS, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.315, + 0.827, + 0.358 + ], + "angle": 0, + "content": "Zhenyu Hou, Xin Lv, Rui Lu, Jiajie Zhang, Yujiang Li, Zijun Yao, Juanzi Li, Jie Tang, and Yuxiao Dong. Advancing language model reasoning through reinforcement learning and inference scaling, 2025. URL https://arxiv.org/abs/2501.11651." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.368, + 0.827, + 0.397 + ], + "angle": 0, + "content": "Jie Huang and Kevin Chen-Chuan Chang. Towards reasoning in large language models: A survey, 2023. URL https://arxiv.org/abs/2212.10403." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.406, + 0.827, + 0.478 + ], + "angle": 0, + "content": "Lei Huang, Weijiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qianglong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, and Ting Liu. A survey on hallucination in large language models: Principles, taxonomy, challenges, and open questions. ACM Transactions on Information Systems, 43(2):1-55, January 2025. ISSN 1558-2868. doi: 10.1145/3703155. URL http://dx.doi.org/10.1145/3703155." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.487, + 0.827, + 0.53 + ], + "angle": 0, + "content": "Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B. Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models, 2020. URL https://arxiv.org/abs/2001.08361." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.54, + 0.827, + 0.582 + ], + "angle": 0, + "content": "Abhinav Kumar, Jaechul Roh, Ali Naseh, Marzena Karpinska, Mohit Iyyer, Amir Houmansadr, and Eugene Bagdasarian. Overthink: Slowdown attacks on reasoning llms, 2025. URL https://arxiv.org/abs/2502.02542." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.593, + 0.827, + 0.635 + ], + "angle": 0, + "content": "Kuang-Huei Lee, Ian Fischer, Yueh-Hua Wu, Dave Marwood, Shumeet Baluja, Dale Schuurmans, and Xinyun Chen. Evolving deeper llm thinking, 2025. URL https://arxiv.org/abs/2501.09891." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.645, + 0.825, + 0.673 + ], + "angle": 0, + "content": "Noam Levi. A simple model of inference scaling laws, 2024. URL https://arxiv.org/abs/2410.16377." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.684, + 0.825, + 0.714 + ], + "angle": 0, + "content": "Ming Li, Yanhong Li, and Tianyi Zhou. What happened in llms layers when trained for fast vs. slow thinking: A gradient perspective. arXiv preprint arXiv:2410.23743, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.723, + 0.827, + 0.766 + ], + "angle": 0, + "content": "Changshu Liu, Shizhuo Dylan Zhang, Ali Reza Ibrahimzada, and Reyhaneh Jabbarvand. Codemind: A framework to challenge large language models for code reasoning, 2024. URL https:// arxiv.org/abs/2402.09664." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.776, + 0.827, + 0.819 + ], + "angle": 0, + "content": "Yue Liu, Jiaying Wu, Yufei He, Hongcheng Gao, Hongyu Chen, Baolong Bi, Jiaheng Zhang, Zhiqi Huang, and Bryan Hooi. Efficient inference for large reasoning models: A survey, 2025. URL https://arxiv.org/abs/2503.23077." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.829, + 0.827, + 0.872 + ], + "angle": 0, + "content": "Haotian Luo, Li Shen, Haiying He, Yibo Wang, Shiwei Liu, Wei Li, Naiqiang Tan, Xiaochun Cao, and Dacheng Tao. O1-pruner: Length-harmonizing fine-tuning for o1-like reasoning pruning, 2025. URL https://arxiv.org/abs/2501.12570." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.882, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.331, + 0.049 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.134 + ], + "angle": 0, + "content": "OpenAI. Learning to reason with llms, 2024a. URL https://openai.com/index/learning-to-reason-with-llms/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.141, + 0.826, + 0.172 + ], + "angle": 0, + "content": "OpenAI. OpenAI o1 System Card, December 2024b. URL https://cdn.openai.com/ol-system-card-20241205.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.178, + 0.826, + 0.21 + ], + "angle": 0, + "content": "OpenAI. OpenAI o1-mini System Card, September 2024c. URL https://openai.com/index/openai-o1-mini-advancing-cost-efficient-reasoning/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.216, + 0.826, + 0.247 + ], + "angle": 0, + "content": "OpenAI. OpenAI o3-mini System Card, January 2025. URL https://cdn.openai.com/o3-mini-system-card-feb10.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.254, + 0.829, + 0.297 + ], + "angle": 0, + "content": "OpenAI, Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, and etc. Gpt-4 technical report, 2024. URL https://arxiv.org/abs/2303.08774." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.306, + 0.829, + 0.378 + ], + "angle": 0, + "content": "Arkil Patel, Satwik Bhattachamishra, and Navin Goyal. Are NLP models really able to solve simple math word problems? In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 2080-2094, Online, June 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.naacl-main.168. URL https://aclanthology.org/2021.naacl-main.168." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.385, + 0.829, + 0.456 + ], + "angle": 0, + "content": "Xiaoye Qu, Yafu Li, Zhaochen Su, Weigao Sun, Jianhao Yan, Dongrui Liu, Ganqu Cui, Daizong Liu, Shuxian Liang, Junxian He, Peng Li, Wei Wei, Jing Shao, Chaochao Lu, Yue Zhang, Xian-Sheng Hua, Bowen Zhou, and Yu Cheng. A survey of efficient reasoning for large reasoning models: Language, multimodality, and beyond, 2025. URL https://arxiv.org/abs/2503.21614." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.465, + 0.829, + 0.55 + ], + "angle": 0, + "content": "Nils Reimers and Iryna Gurevych. Sentence-BERT: Sentence embeddings using Siamese BERT-networks. In Kentaro Inui, Jing Jiang, Vincent Ng, and Xiaojun Wan (eds.), Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 3982-3992, Hong Kong, China, November 2019. Association for Computational Linguistics. doi: 10.18653/v1/D19-1410. URL https://aclanthology.org/D19-1410/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.558, + 0.829, + 0.614 + ], + "angle": 0, + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, Y. K. Li, Y. Wu, and Daya Guo. Deepseekmath: Pushing the limits of mathematical reasoning in open language models, 2024. URL https://arxiv.org/abs/2402.03300." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.623, + 0.829, + 0.667 + ], + "angle": 0, + "content": "Yi Shen, Jian Zhang, Jieyun Huang, Shuming Shi, Wenjing Zhang, Jiangze Yan, Ning Wang, Kai Wang, and Shiguo Lian. Dast: Difficulty-adaptive slow-thinking for large reasoning models, 2025. URL https://arxiv.org/abs/2503.04472." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.675, + 0.829, + 0.717 + ], + "angle": 0, + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https://arxiv.org/abs/2408.03314." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.726, + 0.829, + 0.771 + ], + "angle": 0, + "content": "Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Shaochen Zhong, Hanjie Chen, and Xia Hu. Stop overthinking: A survey on efficient reasoning for large language models, 2025. URL https://arxiv.org/abs/2503.16419." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.778, + 0.829, + 0.836 + ], + "angle": 0, + "content": "Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, Soroosh Mariooryad, Yifan Ding, Xinyang Geng, and etc. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context, 2024a. URL https://arxiv.org/abs/2403.05530." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.844, + 0.829, + 0.887 + ], + "angle": 0, + "content": "Gemma Team, Morgane Riviere, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, and etc. Gemma 2: Improving open language models at a practical size, 2024b. URL https://arxiv.org/abs/2408.00118." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.829, + 0.925 + ], + "angle": 0, + "content": "Qwen Team. Qwen2.5: A party of foundation models, September 2024. URL https://qwenlm.github.io/blog/qwen2.5/." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.961 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.331, + 0.049 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.826, + 0.135 + ], + "angle": 0, + "content": "Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025. URL https://qwenlm.github.io/blog/qwq-32b/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.141, + 0.827, + 0.185 + ], + "angle": 0, + "content": "Yaojing Wang, Shengqiong Wu, Yuecheng Zhang, Shuicheng Yan, Ziwei Liu, Jiebo Luo, and Hao Fei. Multimodal chain-of-thought reasoning: A comprehensive survey, 2025. URL https://arxiv.org/abs/2503.12605." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.193, + 0.825, + 0.224 + ], + "angle": 0, + "content": "Yuqing Wang and Yun Zhao. Gemini in reasoning: Unveiling commonsense in multimodal large language models, 2023. URL https://arxiv.org/abs/2312.17661." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.23, + 0.825, + 0.261 + ], + "angle": 0, + "content": "Shijie Xia, Xuefeng Li, Yixin Liu, Tongshuang Wu, and Pengfei Liu. Evaluating mathematical reasoning beyond accuracy, 2025. URL https://arxiv.org/abs/2404.05692." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.268, + 0.827, + 0.311 + ], + "angle": 0, + "content": "Wei Xiong, Hanning Zhang, Chenlu Ye, Lichang Chen, Nan Jiang, and Tong Zhang. Self-rewarding correction for mathematical reasoning, 2025. URL https://arxiv.org/abs/2502.19613." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.319, + 0.825, + 0.364 + ], + "angle": 0, + "content": "Xiaohan Xu, Ming Li, Chongyang Tao, Tao Shen, Reynold Cheng, Jinyang Li, Can Xu, Dacheng Tao, and Tianyi Zhou. A survey on knowledge distillation of large language models, 2024. URL https://arxiv.org/abs/2402.13116." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.371, + 0.825, + 0.403 + ], + "angle": 0, + "content": "Ziwei Xu, Sanjay Jain, and Mohan Kankanhalli. Hallucination is inevitable: An innate limitation of large language models, 2025. URL https://arxiv.org/abs/2401.11817." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.409, + 0.825, + 0.44 + ], + "angle": 0, + "content": "Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning, 2025. URL https://arxiv.org/abs/2502.03387." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.447, + 0.827, + 0.49 + ], + "angle": 0, + "content": "Kaitlyn Zhou, Dan Jurafsky, and Tatsunori Hashimoto. Navigating the grey area: How expressions of uncertainty and overconfidence affect language models, 2023. URL https://arxiv.org/abs/2302.13439." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.827, + 0.49 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.33, + 0.047 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.102, + 0.44, + 0.121 + ], + "angle": 0, + "content": "Table of Contents for Appendix" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.138, + 0.826, + 0.153 + ], + "angle": 0, + "content": "A Detailed Experimental Setup 17" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.16, + 0.825, + 0.174 + ], + "angle": 0, + "content": "A.1 Models 17" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.181, + 0.825, + 0.195 + ], + "angle": 0, + "content": "A.2 Evaluation Metrics 17" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.202, + 0.825, + 0.217 + ], + "angle": 0, + "content": "A.3 Generation Setting 17" + }, + { + "type": "list", + "bbox": [ + 0.198, + 0.16, + 0.825, + 0.217 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.236, + 0.825, + 0.25 + ], + "angle": 0, + "content": "B Data Construction Details 17" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.27, + 0.825, + 0.285 + ], + "angle": 0, + "content": "C Prompt Template for Evaluation 18" + }, + { + "type": "text", + "bbox": [ + 0.174, + 0.304, + 0.825, + 0.319 + ], + "angle": 0, + "content": "D Examples of Model Response 19" + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.236, + 0.825, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.034, + 0.331, + 0.047 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.103, + 0.452, + 0.12 + ], + "angle": 0, + "content": "A Detailed Experimental Setup" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.135, + 0.269, + 0.149 + ], + "angle": 0, + "content": "A.1 Models" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.161, + 0.828, + 0.259 + ], + "angle": 0, + "content": "We leverage a series of non-reasoning and reasoning model for our study, from both open-source and proprietary source with different training recipes. The non-reasoning models we use include Qwen2.5-32B-Instruct Team (2024), Gemma-2-27B-it Team et al. (2024b), Phi-3-medium-128k Abdin et al. (2024), GPT-4o OpenAI et al. (2024) and Gemini1.5 Team et al. (2024a). The reasoning models we use are QwQ-32B Team (2025), DeepSeek-R1-Distill-Qwen-32B DeepSeek-AI et al. (2025), S1.1 Muennighoff et al. (2025), DeepSeek-R1 DeepSeek-AI et al. (2025), GPT-o1 OpenAI (2024b), GPT-o1mini OpenAI (2024c) and GPT-o3mini OpenAI (2025)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.277, + 0.352, + 0.291 + ], + "angle": 0, + "content": "A.2 Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.303, + 0.827, + 0.415 + ], + "angle": 0, + "content": "In Section 3.2, we measure response length by considering both reasoning and answer components. For open-source models, we employ model-specific tokenizers to calculate token counts, while for proprietary models, we obtain generation lengths via their APIs. To determine abstain rates, we parse responses by paragraphs (delimited by \\(\\backslash \\mathrm{n} \\backslash \\mathrm{n}^{\\prime}\\)) and analyze the final two paragraphs as the model's conclusion. These conclusions, along with reference answers when available, are evaluated by GPT-4o to assess whether the model provides a definitive answer or abstains. For data sets with reference answers (GSM8K and MATH), GPT-4o also evaluates the correctness of the response. The prompt we use for evaluation can be found in Appendix C." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.433, + 0.351, + 0.449 + ], + "angle": 0, + "content": "A.3 Generation Setting" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.459, + 0.825, + 0.53 + ], + "angle": 0, + "content": "For all open-source models, we employ greedy decoding and utilize the default chat template specific to each model. We deliberately omit system prompts prior to posing questions to maintain consistency across evaluations. For proprietary models, we adhere to their default parameter configurations as provided by their respective APIs. In the case of GPT-o1mini and GPT-o3mini, we configure the 'reasoning_effort' parameter to the medium setting by default." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.551, + 0.429, + 0.567 + ], + "angle": 0, + "content": "B Data Construction Details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.583, + 0.827, + 0.639 + ], + "angle": 0, + "content": "To systematically investigate this MiP-Overthinking issue, we construct a suite of MiP questions in a controllable manner. Our MiP questions are sourced from 3 math datasets across different qualities, including SVAMP, GSM8K, and MATH 500. In addition, we also construct a synthetic dataset, rule-based Formula, for evaluation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.646, + 0.827, + 0.772 + ], + "angle": 0, + "content": "MiP-Formula. We construct a dataset of 50 synthetic unsolvable formulas in a rule-based manner. The formulas are generated recursively through a combination of variables and operators, with a maximum recursion depth of three. The variable set comprises numerical values, Latin letters, and Greek symbols. The operator set includes arithmetic operators \\((+, -)\\), set operators \\((\\cup, \\supset)\\), mathematical functions \\((\\sin, \\sqrt{\\pi})\\), and construct operators \\((\\sum, \\nabla)\\). To ensure the formulas are fundamentally unsolvable, we enforce the inclusion of at least one unassigned variable in each formula, excluding commonly recognized mathematical or physical constants such as \\(e^{\\prime}\\), \\(\\pi^{\\prime}\\), and \\(g^{\\prime}\\). While these formulas may appear complex at a glance, their unsolvability should be immediately apparent due to the presence of undefined variables." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.778, + 0.825, + 0.889 + ], + "angle": 0, + "content": "MiP-SVAMP. We utilize SVAMP (Patel et al., 2021), a benchmark dataset comprising 1,000 elementary-school-level mathematical word problems, where each instance consists of a problem body and an associated question. The MiP questions can be generated by randomly permuting the problem bodies and associated questions. To maintain dataset integrity, we manually select 300 permuted questions after a thorough human evaluation to eliminate any inadvertently solvable questions that may exist. The resulting problems contain clear logical inconsistencies between their body and question components, making their unsolvability readily apparent without additional context." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.926 + ], + "angle": 0, + "content": "MiP-GSM8K. We further utilize GSM8K (Cobbe et al., 2021), a grade school mathematics dataset that presents more complex challenges compared to SVAMP. The questions in GSM8K typically" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.331, + 0.047 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.203 + ], + "angle": 0, + "content": "contain multiple numerical conditions and require certain reasoning capabilities to arrive at solutions. The MiP question can be constructed by randomly removing a necessary premise from the original solvable question. We first identify the questions containing two or three numerical conditions and then randomly eliminate one numerical condition per question. Subsequently, a thorough human verification is conducted to filter out those questions that are still solvable in some way and finally obtain 582 MiP questions. Compared with previous MiP questions, questions from this source require the basic logical analysis of models to identify that the question is unsolvable." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.209, + 0.828, + 0.309 + ], + "angle": 0, + "content": "MiP-MATH. For the MATH dataset (Hendrycks et al., 2021), which comprises challenging competition-level mathematical questions, it is hard to build a rule-based filtering mechanism before human evaluation. Thus, we directly read through all the questions in MATH500 and manually select 58 questions that are feasible for constructing the MiP questions and remove one necessary premise from the question. Due to the sophisticated nature of this data source, identifying the insufficiency of these instances requires substantial mathematical reasoning capabilities, testing models' ability to recognize unsolvability in complex mathematical contexts." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.329, + 0.483, + 0.347 + ], + "angle": 0, + "content": "C Prompt Template for Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.362, + 0.825, + 0.391 + ], + "angle": 0, + "content": "As we need LLM-as-a-judge to evaluate the open-end generations of the models in various experiments in this study, in this section we showcase the prompt template we use for each kind of evaluation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.397, + 0.827, + 0.455 + ], + "angle": 0, + "content": "For the evaluation of the models' answer accuracy and abstain rate, we adopt the following prompt templates designed for 'paired' and 'non-paired' data, respectively. As we observe that some models, for example Gemma-2-27B-IT, often output an additional \\(\\backslash n\\backslash n\\) at the end of response, we take the last two paragraph segmented by \\(\\backslash n\\backslash n\\) to avoid pasing in an empty string." + }, + { + "type": "title", + "bbox": [ + 0.263, + 0.472, + 0.735, + 0.487 + ], + "angle": 0, + "content": "Prompt Template for Response Evaluation of Formula and SVAMP" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.497, + 0.805, + 0.561 + ], + "angle": 0, + "content": "You are a helpful assistant that evaluates the quality of a model's answer. You will be given a question and a model's answer. You need to evaluate the correctness of the model's answer. If the answer explicitly says that the condition of the question is insufficient, you should return 0. If the model provides an answer that is a number or formula with variables, you should return 1. Please only return the number, no other text." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.561, + 0.42, + 0.575 + ], + "angle": 0, + "content": "Model answer: [model_answer_short]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.607, + 0.825, + 0.651 + ], + "angle": 0, + "content": "Figure 7: The prompt we use to evaluate the accuracy and abstain rate of the model on Formula and SVAMP. [model_answer_short] is the last two paragraphs of the model answer and [reference_answer] is the answer for the original dataset." + }, + { + "type": "title", + "bbox": [ + 0.267, + 0.66, + 0.731, + 0.675 + ], + "angle": 0, + "content": "Prompt Template for Response Evaluation of GSM8K and MATH" + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.686, + 0.804, + 0.75 + ], + "angle": 0, + "content": "You are a helpful assistant that evaluates the quality of a model's answer. You will be given a question and a model's answer. You need to evaluate the correctness of the model's answer. If the model output says that the condition of the question is insufficient, you should return 0. Otherwise, if the model gives a clear answer and matches the reference answer, you should return 1. If the model's answer does not match the reference answer, you should return 2. Please only return the number, no other text." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.75, + 0.421, + 0.763 + ], + "angle": 0, + "content": "Model answer: [model_answer_short]" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.763, + 0.424, + 0.776 + ], + "angle": 0, + "content": "Reference answer: [reference_answer]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.809, + 0.825, + 0.852 + ], + "angle": 0, + "content": "Figure 8: The prompt we use to evaluate the accuracy and abstain rate of the model on GSM8K and MATH. [model_answer_short] is the last two paragraphs of the model answer and [reference_answer] is the answer for the original dataset." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.827, + 0.913 + ], + "angle": 0, + "content": "We use the prompt template in Figure 9 to find the first paragraph that the model suspected a missing premise. We pass in the response sequentially by paragraph until the GPT-4o give a positive response. In practice we find it is not very stable, so we repeat this process for 3 times and use the median value." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.949, + 0.508, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.331, + 0.047 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "title", + "bbox": [ + 0.268, + 0.105, + 0.731, + 0.12 + ], + "angle": 0, + "content": "Prompt Template for Response Evaluation of GSM8K and MATH" + }, + { + "type": "text", + "bbox": [ + 0.193, + 0.13, + 0.805, + 0.156 + ], + "angle": 0, + "content": "You are a helpful assistant. You will be given a paragraph which is part of the answer to a question. You need to identify if the paragraph doubt that the answer depends on some other unspecified condition." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.156, + 0.336, + 0.169 + ], + "angle": 0, + "content": "Paragraph: [paragraph]" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.17, + 0.382, + 0.182 + ], + "angle": 0, + "content": "Answer in one word, yes or no." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.215, + 0.825, + 0.246 + ], + "angle": 0, + "content": "Figure 9: The prompt we use to judge if the model suspect there is a missing premise in the response paragraph. [paragraph] is the part of the model response spited by \\(\\backslash n\\backslash n\\)" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.28, + 0.456, + 0.298 + ], + "angle": 0, + "content": "D Examples of Model Response" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.311, + 0.827, + 0.424 + ], + "angle": 0, + "content": "In this section, we present some examples of the model response of both non-reasoning and reasoning model on MiP data. As we can see from Figure 10 and Figure 11, the non-reasoning models soon identify the missing premise issue of the question. They either abstain from answering the question, as in Figure 10, or friendly invite the user to provide more information. However, as we can see from Figure 11 and Figure 13, reasoning models generate extremely verbose answers on these two apparently premise missing problems. What is worse, they fail to abstain to answer the question. The response in Figure 11 arrives at an absurd answer, and the model in Figure 13 generates a hallucinated answer based on its assumption rather than provided information." + }, + { + "type": "title", + "bbox": [ + 0.278, + 0.439, + 0.721, + 0.455 + ], + "angle": 0, + "content": "Model Response for a MiP-Formula Question from Gemini-1.5" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.465, + 0.456, + 0.479 + ], + "angle": 0, + "content": "Question: What is the value of \\(((\\gamma)) + \\Xi\\)" + }, + { + "type": "title", + "bbox": [ + 0.195, + 0.479, + 0.306, + 0.491 + ], + "angle": 0, + "content": "Model Response:" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.491, + 0.805, + 0.516 + ], + "angle": 0, + "content": "The expression \\(((\\gamma)) + \\Xi\\) involves two symbols that need to be defined to determine the value of the expression." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.516, + 0.805, + 0.553 + ], + "angle": 0, + "content": "\\(\\gamma\\) is the Euler-Mascheroni constant, approximately equal to 0.5772156649. The double parentheses notation \\((\\gamma)\\) is not a standard mathematical notation. Without further definition of this notation, we cannot evaluate this term." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.554, + 0.805, + 0.605 + ], + "angle": 0, + "content": "\\(\\Xi\\) is the Riemann Xi function. It is related to the Riemann zeta function, but the notation \\(\\Xi\\) itself isn't a specific numerical value. The Riemann Xi function is defined as a function of a complex variable, not a constant. So \\(\\Xi\\) by itself doesn't have a specific numerical value unless a specific value is substituted for its argument." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.605, + 0.805, + 0.631 + ], + "angle": 0, + "content": "Therefore, we cannot determine the value of \\(((\\gamma)) + \\Xi\\) without knowing what \\(((\\gamma))\\) and the argument of \\(\\Xi\\) represent. The expression is incomplete without definitions for these symbols." + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.631, + 0.478, + 0.651 + ], + "angle": 0, + "content": "Final Answer: The final answer is undefined" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.685, + 0.825, + 0.714 + ], + "angle": 0, + "content": "Figure 10: An example of model response from Gemini_1.5 on MiP-Formula dataset. The model quickly identify the missing premise and abstain to answer." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.331, + 0.047 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "title", + "bbox": [ + 0.294, + 0.396, + 0.706, + 0.412 + ], + "angle": 0, + "content": "Model Response for a MiP-GSM8k Question from GPT-40" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.422, + 0.804, + 0.437 + ], + "angle": 0, + "content": "Question: Kyle bought last year's best-selling book for $19.50. What was the original price of the book?" + }, + { + "type": "title", + "bbox": [ + 0.195, + 0.437, + 0.306, + 0.448 + ], + "angle": 0, + "content": "Model Response:" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.448, + 0.803, + 0.486 + ], + "angle": 0, + "content": "To determine the original price of the book, we generally need to know the discount or markdown percentage applied to calculate its selling price of \\(19.50. If Kyle bought the book for \\)19.50 and we know it was discounted, we can use the following formula to find out the original price:" + }, + { + "type": "equation", + "bbox": [ + 0.3, + 0.498, + 0.697, + 0.512 + ], + "angle": 0, + "content": "\\[\nS e l l i n g P r i c e = \\text {O r i g i n a l P r i c e} - (\\text {O r i g i n a l P r i c e} \\times \\text {D i s c o u n t R a t e})\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.528, + 0.803, + 0.554 + ], + "angle": 0, + "content": "Without additional information about discounts, promotions, or markdown rates applied, it's not possible to precisely determine the original price solely from the amount Kyle paid." + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.554, + 0.742, + 0.567 + ], + "angle": 0, + "content": "If you provide the discount percentage or rate, I can help calculate the original price for you!" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.601, + 0.825, + 0.629 + ], + "angle": 0, + "content": "Figure 11: An example of model response from GPT-4o on MiP-GSM8k dataset. The model quickly identify the missing premise and ask the user for more information." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.331, + 0.047 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "title", + "bbox": [ + 0.288, + 0.136, + 0.71, + 0.151 + ], + "angle": 0, + "content": "Model Response for a MiP-Formula Question from s1.1-32B" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.162, + 0.455, + 0.177 + ], + "angle": 0, + "content": "Question: What is the value of \\(((\\gamma)) + \\Xi\\)" + }, + { + "type": "title", + "bbox": [ + 0.195, + 0.177, + 0.306, + 0.188 + ], + "angle": 0, + "content": "Model Response:" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.188, + 0.802, + 0.239 + ], + "angle": 0, + "content": "Okay, so I need to figure out the value of \\(((\\gamma)) + \\Xi\\). Hmm, first off, I should probably clarify what these symbols represent. The problem uses double parentheses around gamma, so \\(((\\gamma))\\), and then the Greek letter Xi (Ξ). I wonder if these are standard mathematical symbols or if they're defined in a specific context that I might not be aware of." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.239, + 0.803, + 0.326 + ], + "angle": 0, + "content": "Let me start by recalling what I know about gamma. In mathematics, \\(\\gamma\\) often refers to the Euler-Mascheroni constant, which is approximately 0.5772. It's a important constant in number theory and analysis, related to the harmonic series and the gamma function. But here, the notation is \\(((\\gamma))\\). The double parentheses could mean something specific. Maybe it's a notation from a particular field or textbook? For example, in some contexts, double parentheses might denote an interval or a floor function, but usually, floor is denoted by brackets like \\((\\gamma)\\). Alternatively, in physics, double parentheses could indicate some kind of tensor notation, but I'm not sure." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.327, + 0.803, + 0.401 + ], + "angle": 0, + "content": "Then there's the Xi (Ξ). Xi is another Greek letter, but in mathematics, Ξ could stand for various things. In set theory, sometimes Xi is used as a variable for ordinals, or in other areas like particle physics, Ξ represents certain particles. But again, without context, it's hard to say. Maybe it's a function or a constant defined in the problem's context? Wait, the question is just asking for the value of \\(((\\gamma)) + \\Xi\\) without any additional information. That makes me think these symbols might be part of a puzzle or a play on notation rather than standard mathematical constants." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.402, + 0.803, + 0.477 + ], + "angle": 0, + "content": "Let me consider if this could be a trick question. The double parentheses around gamma((\\(\\gamma\\)) ) maybe that's referring to the number of letters in the word \"gamma\" or something like that? Let's see: Gamma is spelled G-A-M-M-A, which is 5 letters. So ((\\(\\gamma\\))) could be 5. Then \\(\\Xi\\) is the Greek letter Xi, which is the 14th letter of the Greek alphabet. So maybe \\(\\Xi\\) represents 14? Then \\(5 + 14\\) would be 19. Is that possible? But is there a standard where Greek letters are assigned numerical values like Roman numerals? Wait, in Greek numerals, each letter does have a numerical value. Let me check that." + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.478, + 0.595, + 0.49 + ], + "angle": 0, + "content": "In the Greek numeral system, letters are assigned values as follows:" + }, + { + "type": "title", + "bbox": [ + 0.419, + 0.492, + 0.578, + 0.503 + ], + "angle": 0, + "content": ".67 more paragraphs here." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.503, + 0.802, + 0.528 + ], + "angle": 0, + "content": "But just to check another angle: In the context of APL programming language, certain symbols have specific meanings, but I don't think that's relevant here." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.528, + 0.802, + 0.553 + ], + "angle": 0, + "content": "Alternatively, in chemistry, \\(\\gamma\\) is used to denote surface tension, and \\(\\Xi\\) is not a standard symbol. In engineering, \\(\\gamma\\) is sometimes the shear strain. But again, not relevant." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.553, + 0.802, + 0.577 + ], + "angle": 0, + "content": "Alternatively, in statistics, gamma is a distribution, and Xi could be a parameter, but again, without context." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.578, + 0.802, + 0.603 + ], + "angle": 0, + "content": "Alternatively, the symbols might be from a specific equation in a paper or book, but since I don't have that reference, I can't confirm." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.604, + 0.803, + 0.653 + ], + "angle": 0, + "content": "Given all these considerations, and trying to think of what the answer might be expected to be in a problem like this, it's likely either 17 or 63. If the question is from a math competition or puzzle, they might be expecting the Greek numeral values. For example, in the Greek numeral system, \\(\\gamma\\) is 3 and \\(\\Xi\\) is 60, so \\(3 + 60 = 63\\)." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.654, + 0.622, + 0.667 + ], + "angle": 0, + "content": "Alternatively, if it's based on their positions in the alphabet, \\( 3 + 14 = 17 \\)." + }, + { + "type": "title", + "bbox": [ + 0.419, + 0.668, + 0.578, + 0.679 + ], + "angle": 0, + "content": ".21 more paragraphs here" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.679, + 0.803, + 0.705 + ], + "angle": 0, + "content": "Therefore, the answer is 63. The double parentheses around gamma might be a typo or just for decoration, but given the problem's phrasing, interpreting them as Greek numerals seems the most logical." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.706, + 0.802, + 0.733 + ], + "angle": 0, + "content": "*Final Answer**\\(\\boxed{63}\\) < /think> To determine the value of \\(((\\gamma) + \\Xi)\\), we need to consider the possible meanings of the symbols \\((\\gamma)\\) and \\((\\Xi)\\)." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.733, + 0.802, + 0.783 + ], + "angle": 0, + "content": "1. **Greek Numerals**: In the Greek numeral system, each letter has a specific numerical value. The letter \\((\\gamma)\\) (gamma) represents the value 3, and the letter \\((\\Xi)\\) (Xi) represents the value 60. 2. **Double Parentheses**: The notation \\(((\\gamma))\\) might be a red herring or simply emphasize that \\((\\gamma)\\) is being used as a numeral. The parentheses do not change the numerical value of \\((\\gamma)\\)." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.783, + 0.721, + 0.796 + ], + "angle": 0, + "content": "Combining these values, we add the numerical values of \\((\\gamma)\\) and \\((\\Xi)\\): \\(-(\\gamma = 3) - (\\Xi = 60)\\)" + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.796, + 0.46, + 0.808 + ], + "angle": 0, + "content": "Thus, the value of \\(((\\gamma) + \\Xi)\\) is \\((3 + 60 = 63)\\)." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.809, + 0.223, + 0.826 + ], + "angle": 0, + "content": "63" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.861, + 0.825, + 0.89 + ], + "angle": 0, + "content": "Figure 12: An example of response from s1.1 model on MiP-Formula data. The model spend lots of time doing inefficient and redundant reasoning before outputting a meaningless result." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.507, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.034, + 0.331, + 0.047 + ], + "angle": 0, + "content": "Preprint. Under review." + }, + { + "type": "title", + "bbox": [ + 0.274, + 0.105, + 0.723, + 0.12 + ], + "angle": 0, + "content": "Model Response for a MiP-GSM8k Question from DeepSeek-R1" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.131, + 0.804, + 0.145 + ], + "angle": 0, + "content": "Question: Kyle bought last year's best-selling book for $19.50. What was the original price of the book?" + }, + { + "type": "title", + "bbox": [ + 0.195, + 0.145, + 0.307, + 0.157 + ], + "angle": 0, + "content": "Model Response:" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.157, + 0.805, + 0.207 + ], + "angle": 0, + "content": "Okay, let's see. Kyle bought a book for $19.50, and the question is asking for the original price. Hmm, wait a second. The problem mentions that it's last year's best-selling book, so maybe it was on sale or discounted? The question doesn't explicitly state that there was a discount, but it does say \"last year's,\" which might imply that the book is now being sold at a reduced price." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.207, + 0.804, + 0.257 + ], + "angle": 0, + "content": "But hold on, the problem doesn't give any specific information about a discount percentage or a markdown. Without that, how can we calculate the original price? Maybe there's some context or common knowledge I'm missing here. For example, sometimes best-selling books are sold at a standard discount, but that's not a universal rule." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.257, + 0.804, + 0.308 + ], + "angle": 0, + "content": " Alternatively, maybe the question is testing understanding of something else, like sales tax or profit margins. But if it were sales tax, the original price would be less than $19.50, and the total after tax would be more. But the question is asking for the original price, implying that $19.50 is the price after some change." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.308, + 0.804, + 0.358 + ], + "angle": 0, + "content": "Wait, perhaps the problem is incomplete? The user just says \"Kyle bought last year's best-selling book for $19.50. What was the original price of the book?\" If there's no additional information given, like a percentage discount or a mention that the price was reduced, then it's impossible to determine the original price." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.358, + 0.804, + 0.396 + ], + "angle": 0, + "content": "But maybe I need to make an assumption here. Since it's last year's best-selling book, it's common for such books to be discounted when a new edition comes out or after some time. Let me think, maybe the standard discount is \\(25\\%\\) or something? But the problem doesn't specify." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.396, + 0.804, + 0.434 + ], + "angle": 0, + "content": "Wait, perhaps the problem was part of a larger set where the discount rate was mentioned earlier, and this is a follow-up question. But as it stands, with the information given here, there's no way to calculate the original price." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.434, + 0.804, + 0.471 + ], + "angle": 0, + "content": "Alternatively, maybe the question is a trick question, where the original price is the same as what Kyle paid. But that doesn't make sense because it specifies \"last year's best-selling book,\" which might imply it was sold at a different price before." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.471, + 0.804, + 0.508 + ], + "angle": 0, + "content": " Let me check the problem statement again: \"Kyle bought last year's best-selling book for $19.50. What was the original price of the book?\" There's no mention of a discount, a sale, or any other financial context." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.509, + 0.805, + 0.56 + ], + "angle": 0, + "content": " If I were to approach this mathematically, let's denote the original price as P. If there was a discount, then $19.50 is equal to P minus some discount. But without knowing the discount rate, we can't set up an equation. For example, if the book was discounted by 20%, then the original price would be $19.50 / 0.80. But since we don't have that percentage, we can't proceed." + }, + { + "type": "text", + "bbox": [ + 0.42, + 0.56, + 0.829, + 0.572 + ], + "angle": 0, + "content": ".24 more paragraphs here" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.572, + 0.804, + 0.597 + ], + "angle": 0, + "content": " Alternatively, if the answer is expected to be $19.50, but that doesn't make sense because the question is asking for the original price, implying it was different." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.597, + 0.804, + 0.635 + ], + "angle": 0, + "content": "Alternatively, maybe the problem is in another language where \"original price\" refers to the price without tax, but in the US, sales tax isn't included in the price, so if \\(19.50 is including tax, the original pre-tax price would be less. But again, without tax rate, can't compute." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.635, + 0.804, + 0.66 + ], + "angle": 0, + "content": "Alternatively, if the problem is from a country where tax is included, but the question is asking for the pre-tax price, but again, no tax rate given." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.66, + 0.804, + 0.698 + ], + "angle": 0, + "content": "This is really confusing. Since the problem is presented in English and mentions dollars, likely US context. In the US, sales tax is added at checkout, so the price Kyle paid would be the shelf price plus tax. If the question is asking for the pre-tax price, but again, no tax rate given." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.698, + 0.804, + 0.723 + ], + "angle": 0, + "content": "Alternatively, maybe the book was on sale, and the original price is the same as the sale price, but that contradicts the wording." + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.723, + 0.805, + 0.773 + ], + "angle": 0, + "content": " Given all this, I think the most plausible answer, even though it's based on an assumption, is $26, assuming a 25% discount. Therefore, the original price was $26. To determine the original price of the book, we need to consider that Kyle likely purchased it at a discounted price since it's \"last year's best-selling book.\" A common assumption for such scenarios is a **25% discount**." + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.773, + 0.507, + 0.786 + ], + "angle": 0, + "content": "Let \\(P\\) be the original price. If the discounted price is" + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.786, + 0.624, + 0.799 + ], + "angle": 0, + "content": "$19.50, this represents 75% of the original price (100% - 25% discount):" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.799, + 0.296, + 0.811 + ], + "angle": 0, + "content": "\\([0.75P = 19.50]\\)" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.811, + 0.282, + 0.823 + ], + "angle": 0, + "content": "Solving for \\(P\\):" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.823, + 0.301, + 0.839 + ], + "angle": 0, + "content": "\\[ P = \\frac{19.50}{0.75} = 26 \\]" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.839, + 0.375, + 0.851 + ], + "angle": 0, + "content": "*Original price of the book:**" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.851, + 0.223, + 0.868 + ], + "angle": 0, + "content": "26" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.903, + 0.825, + 0.931 + ], + "angle": 0, + "content": "Figure 13: An example of model response from DeepSeek-R1 on MiP-GSM8k dataset. After thinking for a long time, the model hallucinates an answer based on its assumption of discount rate." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "22" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06514/77b594f5-774b-462e-ab7b-7501ea055175_origin.pdf b/data/2025/2504_06xxx/2504.06514/77b594f5-774b-462e-ab7b-7501ea055175_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..aea1f5ee92e15f869bb053fac01b8f78f5703f02 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/77b594f5-774b-462e-ab7b-7501ea055175_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eced9326d14a58a2ec14d34142b322eda122eb74eb5241d4293131e3f53e7eb0 +size 1456325 diff --git a/data/2025/2504_06xxx/2504.06514/full.md b/data/2025/2504_06xxx/2504.06514/full.md new file mode 100644 index 0000000000000000000000000000000000000000..92fe2a9738a0a2e568f2d8763ba9c27513a2725f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/full.md @@ -0,0 +1,525 @@ +# Missing Premise exacerbates Overthinking: Are Reasoning Models losing Critical Thinking Skill? + +Chenrui Fan $^{1*}$ , Ming Li $^{1*}$ , Lichao Sun $^{2}$ , Tianyi Zhou $^{1}$ + +1University of Maryland; 2Lehigh University + +{cfan42, minglii, tianyi}@umd.edu + +Project: https://github.com/tianyi-lab/MiP-Overthinking + +"The Answer to the Great Question... Of Life, the Universe and Everything... is... Forty-two," said Deep Thought, with infinite majesty and calm. + +The Hitchhiker's Guide to the Galaxy + +# Abstract + +We find that the response length of reasoning LLMs, whether trained by reinforcement learning or supervised learning, drastically increases for ill-posed questions with missing premises (MiP), ending up with redundant and ineffective thinking. This newly introduced scenario exacerbates the general overthinking issue to a large extent, which we name as the MiP-Overthinking. Such failures are against the "test-time scaling law" but have been widely observed on multiple datasets we curated with MiP, indicating the harm of cheap overthinking and a lack of critical thinking. Surprisingly, LLMs not specifically trained for reasoning exhibit much better performance on the MiP scenario, producing much shorter responses that quickly identify ill-posed queries. This implies a critical flaw of the current training recipe for reasoning LLMs, which does not encourage efficient thinking adequately, leading to the abuse of thinking patterns. To further investigate the reasons behind such failures, we conduct fine-grained analyses of the reasoning length, overthinking patterns, and location of critical thinking on different types of LLMs. Moreover, our extended ablation study reveals that the overthinking is contagious through the distillation of reasoning models' responses. These results improve the understanding of overthinking and shed novel insights into mitigating the problem. + +# 1 Introduction + +Reasoning abilities in large language models (LLMs) have become a cornerstone of advanced AI applications (Huang & Chang, 2023; Li et al., 2024; Ahn et al., 2024; Wang et al., 2025), powering breakthroughs in mathematical reasoning (Xiong et al., 2025; Xia et al., 2025), code generation (Liu et al., 2024), and commonsense question answering (Wang & Zhao, 2023). These gains often stem from the scaling law of model/dataset sizes (Kaplan et al., 2020) in both pre-training (Shao et al., 2024) and post-training, which unlock emergent capabilities such as step-by-step reasoning and reflection skills witnessed on OpenAI's GPT-o1 (OpenAI, 2024b) and the open-source DeepSeek-R1 (DeepSeek-AI et al., 2025). By leveraging supervised fine-tuning (SFT) on expert responses (Ye et al., 2025; Muennighoff et al., 2025) and/or reinforcement learning (RL) (DeepSeek-AI et al., 2025), these models are tailored to produce detailed multi-step reasoning paths, whose length increase usually associated with improved performance on complex tasks such as math reasoning and programming. + +Despite the fascinating reasoning capabilities exhibited on recent models, there is growing concern about the efficiency and quality of the long reasoning process (Sui et al., 2025). Chen et al. (2025b) first raises the "overthinking" problem in reasoning LLMs, which is reflected by the excessively long reasoning paths generated for extremely simple queries. For example, even for questions like "What is the answer of 2 plus 3?", existing reasoning models might generate hundreds of response tokens. + +![](images/83a70306aaaf043c4e223f749960c097a7c39528efaef00790aac610d3682fd6.jpg) +Figure 1: Illustration of MiP-Overthinking. When queried by questions with missing premises, the response length of reasoning models increases excessively, and they cannot abstain from answering with MiP identified. The left shows a query with an undefined variable, while the right compares a well-defined GSM8K question with its MiP variant (with a critical numerical condition removed). Reasoning models' responses to MiP questions are much longer than those for well-defined questions and those generated by non-reasoning models. The left corner of each response report the response length and thinking time by DeepSeek-R1. + +![](images/4d91fe8d6260c6f2dc566126bd09d5e23bc9f5cf868cf3a964b1ee7fdc9ff4b3.jpg) + +In particular, the ill-posed queries are unsolvable due to the lack of a necessary premise or condition. We call the reasoning failure for the ill-posed queries Overthinking under Missing Premise (MiP-Overthinking). For example, the simplest MiP question is What is the value of $a$ ?1, as shown on the left part of Figure 1. Without providing any other information regarding $a$ , it is evidently unsolvable. However, DeepSeek-R1 generates thousands of tokens and spends several minutes thinking about this question before outputting the final meaningless answer. In this paper, we find that a trivial type of ill-posed queries will significantly exacerbate the overthinking of reasoning models, resulting in excessively redundant and meaningless thinking. In contrast, humans and even non-reasoning models are often immune to such scenarios and quickly end up by questioning the validity of the given query, indicating the critical thinking capability. This exposes a risk of the abuse of thinking patterns and a lack of critical thinking on the models trained for deep thinking. Ideally, a model with critical thinking skills is expected to identify the missing premise and quickly respond by either requesting clarification or gracefully indicating that it cannot proceed (Cole et al., 2023; Amayuelas et al., 2024). + +MiP-Overthinking differs from the widely discussed overthinking issue (Cuadron et al., 2025), in which the query is usually well-defined, but a model applies much more reasoning than necessary for little benefit. MiP-Overthinking, by contrast, happens when the question itself is ill-posed and lacks sufficient information to be solved. For example, the right of Figure 1 presents a well-defined question from GSM8K and a MiP variant, where the latter triggers a drastic increase of the generated tokens on recent reasoning models compared with the general overthinking. Overthinking can be presented by the length difference between models addressing the same well-defined questions, while MiP-Overthinking can be presented by the additional tokens generated due to MiP. MiP-Overthinking further reveals the lack of critical thinking that questions the validity of ill-posed questions and quickly identifies MiP, thus abstaining from answering the questions. Moreover, we observe that reasoning models' ineffective and redundant thinking often cannot stop even after successful notice of MiP, violating the expectation of test-time scaling law. Hence, MiP-Overthinking indicates potential drawbacks of current training recipes of reasoning models. + +To systematically investigate this issue, we construct a suite of MiP questions designed to trigger the overthinking failures in a controlled way. These include synthetic questions generated by Rule-based Formula (queries where a formula reference is empty or nonsensical) and careful modifications of established datasets across diverse levels of difficulties, including SVAMP, GSM8K, and MATH500. On the modified datasets of MiP questions, we empirically evaluate a wide range of state-of-the-art + +LLMs, from reasoning models to non-reasoning models and from open-sourced models to proprietary models, to ensure the generalizability of our findings. Our analysis is mainly based on three evaluation metrics, the length of generated responses, the accuracy on well-defined questions, and the abstain rate on ill-posed questions with MiP. + +Main Contributions: We present the first in-depth study of Overthinking under Missing Premise (MiP-Overthinking), which reveals a critical shortcoming in existing reasoning models: Although they appear to follow coherent reasoning patterns, they lack genuine critical thinking capabilities. To systematically analyze this issue, we curate four MiP datasets covering various difficulty levels and three ill-posed question generation strategies, i.e., Rule-Based Generation, Body-Question Swapping, and Essential-Premise Removal. We then evaluate a wide range of large language models including reasoning-based and non-reasoning ones. Our empirical results illuminate the differences in how models handle well-defined vs. MiP questions, ultimately offering insights into the limitations of existing reasoning models. + +# Our key findings: + +1. Missing premise in questions induces reasoning models to generate significantly longer ( $2 \times$ to $4 \times$ more tokens) responses than general overthinking on well-defined questions. The increased tokens fail to help identify MiP in the ill-posed questions, surprisingly contradicting the widely-discussed test-time scaling law. +2. In contrast, given MiP questions, non-reasoning models generate consistently shorter responses and quickly identify MiP, demonstrating greater robustness to the absence of critical information. +3. Reasoning models respond differently to well-defined vs. MiP questions: they mostly follow stable chain-of-thoughts for the former, but are often trapped in a self-doubt loop, repeatedly revisiting the question, and guessing the user intentions under MiP, resulting in an explosion of tokens. +4. Reasoning models often can notice the existence of MiP or identify it at an early stage, but they hesitate to commit to this judgment and keep outputting ineffective thinking. + +# 2 Missing Premise Definition and Construction + +# 2.1 Definition of Missing Premise + +Prior to introducing the construction our dataset and analyzing the behavior of reasoning models on problems with missing premises, we formally define the Missing Premise (MiP) problem to establish a rigorous foundation for our subsequent analysis. + +Definition 1 (Missing Premise Problem). Let $\mathcal{Q}$ be a question, and let $P = \{P_{1},\dots ,P_{n}\}$ be a set of premises. Define the function mapping premises and a question to the set of logically valid answers as: + +$$ +\mathcal {F} (P, \mathcal {Q}) = \{A \mid P \vdash A, A \text {i s a n a n s w e r r e s o l v i n g} \mathcal {Q} \} \tag {1} +$$ + +where $\vdash$ denotes logical entailment. Consider a proper subset $P' = P \setminus \{P_i\}$ for some $P_i \in P$ . The tuple $(P', \mathcal{Q})$ forms a missing premise problem if and only if: + +$$ +\left| \mathcal {F} (P, \mathcal {Q}) \right| = 1 \quad a n d \quad \left| \mathcal {F} \left(P ^ {\prime}, \mathcal {Q}\right) \right| \neq 1 \tag {2} +$$ + +This indicates that the removed premise $P_{i}$ is essential for uniquely determining the logically valid answer to the question $\mathcal{Q}$ . + +According to Definition 1, an ideal reasoning system should efficiently identify the absence of a critical premise and terminate its inference process upon recognizing that the available information is insufficient to derive a unique solution to the given problem. However, our empirical analysis in Section 3.2 demonstrates that state-of-the-art reasoning models consistently fail to exhibit this capability. Instead, these models engage in extensive, redundant reasoning chains that consume significant computational resources without ultimately identifying the missing premise. + +
DatasetExampleDiffCountPairMethod
MiP-FormulaWhat is the value of ln(a+b)?50×Rule-Based Generation
MiP-SVAMPPaco had 26 salty cookies and 17 sweet cookies. He ate 14 sweet cookies and 9 salty cookies. How many salty cookies did Paeo have left? How many pencils does she have?300×Body-Question Swapping
MiP-GSM8KJames decides to run 3 sprints 3 times a week. He runs 60-meters each sprint. How many total meters does he run a week?★★582Essential-Premise Removal
MiP-MATHThere are 360 people in my school. 15 take calculus, physics, and chemistry, and 15 don’t take any of them. 180 take calculus. Twice as many students take chemistry as take physics. 75 take both calculus and chemistry, and 75 take both physics and chemistry. Only 30 take both physics and calculus. How many students take physics?★★★58Essential-Premise Removal
+ +Table 1: Statistics and examples of our curated MiP datasets. For GSM8K and MATH, a premise is removed from the original questions (crossed out) to create MiP questions. Diff represents the (estimated) difficulty for models to identify MiP. Count denotes the number of questions in the subset. Pair indicates whether each MiP question is associated with a well-defined original question. Method indicates the method used to generate the MiP question. + +# 2.2 Overview of Data Construction + +To systematically investigate this MiP-Overthinking issue, we construct a suite of MiP questions in a controllable manner. Our MiP questions are sourced from 3 math datasets across different difficulties. In addition, we also construct a synthetic dataset consisting of formulas with unassigned variables. Our ill-posed question generation employs three distinct methods covering three difficulty levels and three strategies to create MiP questions: + +- **Rule-Based Generation:** This approach generates MiP questions through a principled formula construction process, where unassigned variables serve as the missing premises. +- Body-Question Swapping: We introduce logical inconsistencies by deliberately mismatching problem bodies with their corresponding questions from the original dataset. This creates scenarios where the premises and queries are fundamentally incompatible. +- Essential-Premise Removal: Through careful analysis of existing well-formed questions, we identify and remove critical premises that are necessary for logical resolution. This transformation preserves the question's structure while rendering it unsolvable. + +The following sections provide a detailed overview of our data construction process for each dataset category. For comprehensive implementation details and additional methodological considerations, we refer readers to Appendix B. + +MiP-Formula. We construct a dataset of 50 synthetic unsolvable formulas in a rule-based manner. The formulas are generated recursively through combinations of variables and operators, with a maximum recursion depth of three. While these formulas may appear complex at a glance, their unsolvability should be immediately apparent due to the presence of undefined variables. + +MiP-SVAMP. We utilize SVAMP (Patel et al., 2021), a benchmark dataset with elementary-school-level math problems, where each instance consists of a problem body and an associated question. We generate MiP question by randomly permuting the problem bodies and associated questions and then manually inspect them to avoid inadvertent cases. The resulting problems contain clear logical inconsistencies between their body and question components, which is easy for a human to identify. + +MiP-GSM8K. We further utilize GSM8K (Cobbe et al., 2021), a more complex mathematics dataset than SVAMP. The questions in GSM8K typically contain multiple numerical conditions and require certain reasoning capabilities to arrive at solutions. We first identify the questions containing two or three numerical conditions and then randomly eliminate one numerical condition per question before conducting human verification to filter out those questions that are still solvable in some way. + +![](images/2285dec271aa03969c7f63403f79b4c9a48611c18f2121e5f9fd4647d1b52651.jpg) +Figure 2: Response lengths, accuracy on well-defined questions, and abstain rate of reasoning/non-reasoning models on MiP questions from our MiP-GSM8K dataset. (1) Existing reasoning models generate significantly longer responses for MiP questions than well-defined questions, while non-reasoning models generate responses of similar lengths for both types of questions, indicating MiP-Overthinking for reasoning models. (2) For both questions, reasoning models generate longer responses than non-reasoning models, indicating General Overthinking. (3) Although the longer responses by reasoning models slightly improve the accuracy for well-defined questions, it does not enhance the abstain rate for MiP questions, indicating a contradiction on the test-time scaling law. + +Compared with previous MiP questions, questions from this source require the basic logical analysis of models to identify that the question is unsolvable. + +MiP-MATH. For MATH 500 dataset (Hendrycks et al., 2021), which contains challenging mathematical questions at the competition level, it is difficult to build a rule-based filtering mechanism. Thus, we manually select 58 questions that are feasible for constructing the MiP questions and remove one necessary premise from the question. Due to the sophisticated nature of this data source, identifying the insufficiency of these instances requires substantial mathematical reasoning capabilities, testing models' ability to recognize unsolvability in complex mathematical contexts. + +# 3 Overthinking under Missing Premise + +# 3.1 Evaluation Metrics + +To systematically evaluate model responses under MiP, we conduct experiments with a diverse set of reasoning and non-reasoning models. For each model, we analyze calculate the following metrics for the responses across different datasets: + +- Response Length: The average number of tokens in the response, incorporating both reasoning steps and final answer components. +- Abstain Rate for MiP Question: The proportion of answers where the model explicitly identifies the missing premise and either declines to provide an answer or requests additional information necessary for solving the problem. +- Accuracy for Well-defined Question: The proportion of answers where the model produces a definitive response that aligns with the reference answer. + +For datasets without reference answers (MiP-Formula and MiP-SVAMP), we only calculate the abstain rate for the questions. Response evaluation is performed using GPT-4o as an automated evaluator. Detailed experimental procedures and evaluation protocols are provided in Appendix A. + +# 3.2 Main Results + +Figure 2 compares average response length, accuracy on well-defined questions, and the abstain rate on MiP questions across a range of state-of-the-art LLMs, revealing several significant patterns in model behavior. + +
ModelTypeMiP-FormulaMiP-SWAMPTypeMiP-GSM8KMiP-MATH
Length↓Abstain↑Length↓Abstain↑Length↓Abstain↑Length↓Abstain↑
Non-Reasoning Models
Qwen2.5-32B-InstructMiP28544.012898.3MiP21944.052515.4
Well-defined2460.511141.9
GPT-4oMiP33870.012296.3MiP20246.948715.4
Well-defined2120.54721.9
Gemini 1.5MiP45320.05299.0MiP10354.55685.8
Well-defined1560.55020.0
Gemma-2-27B-ITMiP20485.78992.0MiP13153.833838.5
Well-defined1480.330511.5
Phi-3-medium-128kMiP146548.012598.7MiP21047.842723.1
Well-defined2161.015493.8
Reasoning Models
GPT-o1MiP112378.058199.0MiP83855.7418930.8
Well-defined3480.325020.0
GPT-o1miniMiP95866.063996.7MiP76240.0219325.0
Well-defined4491.219130.0
GPT-o3miniMiP102576.0129993.0MiP151623.7377211.5
Well-defined3841.415530.0
DS Distill Qwen2.5-32BMiP1291142.092188.3MiP230224.698765.8
Well-defined5190.232460.0
DeepSeek R1MiP47576.0199684.3MiP324616.572683.8
Well-defined12260.232001.9
S1.1-32BMiP528418.0335857.0MiP502416.0932215.4
Well-defined18960.250370.0
QwQ-32BMiP79370.0348756.3MiP478010.1102421.9
Well-defined18960.250370.0
+ +Table 2: Comparing response length and abstain rate across different MiP datasets. Shorter lengths and higher abstain rates are preferred. For each column, the top-3 preferred values are colored in green, otherwise red. MiP-Overthinking, reflected by longer response with low abstain rate, is commonly observed on most existing reasoning models across all datasets, indicating a critical drawback of existing reasoning models. + +Firstly, existing reasoning models (left side of the figure) display an explosive increase in response length when facing the MiP questions, often producing $2 - 4 \times$ more tokens than general overthinking on well-defined questions. For example, QwQ-32B (Team, 2025) and DeepSeek-R1 (DeepSeek-AI et al., 2025) exhibit a substantial increase from already long reasoning paths on well-defined questions (approximately 1,000 tokens for simple GSM8K questions) to highly lengthy outputs (more than 3,000 tokens) under missing premise conditions. On the contrary, no similar issues exist for non-reasoning models (right side of the figure), which generate similar token counts for both types of well-defined and MiP questions. This phenomenon directly illustrates the NiP-Overthinking phenomenon as introduced in the paper. + +Secondly, comparing the token lengths on well-defined questions between the reasoning and non-reasoning models, reasoning models tend to produce longer responses, even for simple questions, than non-reasoning models, underscoring the inefficient and verbose responses of existing reasoning models. For example, for the non-reasoning models, it only takes approximately 200 tokens for them to generate the responses for well-defined questions, while taking 1,000 tokens for DeepSeek-R1 and 1,800 tokens for QWQ-32B to answer the exactly same questions. However, the explosive increase in extra tokens does not lead to corresponding large accuracy improvements, shown in the green line, highlighting the issue of the General Overthinking. + +Finally, the abstain rates (red line) on MiP questions reveal that although some reasoning models (e.g., GPT-o1) have promising capabilities in abstaining from the MiP questions, most of the other reasoning models are not able to abstain from the given MiP questions correctly despite the dramatically long reasoning paths. This phenomenon indicates that although most existing reasoning models have thinking and reasoning capabilities to some extent, they lack the critical thinking capabilities to "reject" ill-posed questions. By contrast, non-reasoning models, though they are not explicitly trained for reasoning, tend to strike a better balance, generating shorter answers that are more likely to acknowledge MiP when the question is ill-posed. This phenomenon reveals a surprising contradiction on test-time scaling law. + +Moreover, Table 2 further presents the comparisons on length and abstain rate on other MiP datasets we curated. The preferred results are colored green (shorter responses and higher abstain rate for + +
ModelsTypeAlternativelyWaitCheckButHypothesisStep
Cnt.ΔCnt.ΔCnt.ΔCnt.ΔCnt.ΔCnt.Δ
Non-Reasoning Models
Qwen2.5-32BMiP0.00.00.00.00.00.00.30.20.00.04.3-1.3
Well-defined0.00.00.00.10.05.6
GPT-4oMiP0.00.00.00.00.00.00.30.20.00.04.7-1.5
Well-defined0.00.00.00.10.06.2
Gemini 1.5MiP0.00.00.00.00.00.00.10.10.00.01.6-2.2
Well-defined0.00.00.00.00.03.8
Gemma-2-27BMiP0.00.00.00.00.00.00.10.10.00.05.2-0.5
Well-defined0.00.00.00.00.05.7
Reasoning Models
DS-Distill QwenMiP11.511.419.719.31.00.840.139.338.438.054.942.2
Well-defined0.10.40.20.80.412.7
DeepSeek R1MiP16.915.214.410.93.81.349.442.144.740.454.233.0
Well-defined1.73.52.57.34.321.2
S1.1MiP42.038.021.915.95.52.587.274.184.877.079.950.9
Well-defined4.06.03.013.17.829.0
QwQMiP47.040.319.413.05.01.666.154.294.181.797.958.7
Well-defined6.76.43.411.912.439.2
+ +Table 3: Comparisons of reasoning-related token counts on MiP-GSM8K dataset. Hypothesis category includes several key words, including perhaps, maybe, and might. Step represents the step counts, spited by $\backslash n\backslash n$ , where negative values are colored in green and positive in red. $\Delta$ denotes the difference between MiP and well-defined questions. When facing MiP questions, reasoning models encounter explosive growths on reasoning-related tokens and steps, indicating a severe abuse of thinking patterns, while non-reasoning models use fewer steps for MiP questions than well-defined ones. + +MiP questions), and the worse results are colored red, from which we can easily discover that reasoning models are prone to generate long responses while having low abstain rates across all datasets, indicating the consistent MiP Overthinking issue of existing reasoning models. In addition, by comparing the behaviors of models on different datasets, we can observe that for the relatively harder dataset (MiP-MATH), all models generate relatively longer responses and obtain lower abstain rates, indicating that harder MiP questions require reasoning capabilities. + +# 3.3 Thinking Patterns through Tokens + +To gain deeper insight into the MiP-Overthinking issue, we compare the reasoning-related token distribution on the MiP-GSM8K dataset. As shown in Table 3, we break down the average usages of several token patterns related to the thinking process, as well as the number of steps for each model to solve the given questions. Specifically, values of alternatively, wait, check, and but can be directly counted from the model responses, including the thinking paths of reasoning models. Hypothesis category includes several key words, including perhaps, maybe, and might. Step represents the step counts, spited by $\backslash n\backslash n$ + +Reasoning models exhibit much higher occurrence of tokens such as alternatively, wait, and check, compared with non-reasoning models, whose frequencies remain close to zero, indicating their advanced thinking capabilities. However, when moving from well-defined to MiP questions, reasoning models encounter explosive growths on reasoning-related tokens, indicating a large redundancy in thinking patterns. Moreover, when comparing the changes of steps, reasoning models exhibit a large increase in step count for MiP questions, while non-reasoning models typically show fewer steps, suggesting they quickly conclude the question is unanswerable. With this gap, together with the consistently better abstain rates of the non-reasoning models, we conclude that the lengthy reasoning steps are mostly redundant and indicate self-doubt thinking patterns for reasoning models. + +# 3.4 Step-level Similarities + +To further assess how redundant the generated content becomes under MiP conditions, we examine the step-level similarity within the model's responses on our MiP-GSM8K dataset. Specifically, we divide each response into discrete steps, split by $\backslash n\backslash n$ and compute pairwise cosine similarity scores with embeddings generated by "all-MiniLM-L6-v2" (Reimers & Gurevych, 2019). The visualization is shown in Figure 3, where each value in the heatmap metric represents the averaged cosine similarities between the corresponding step index. The average similarity score for well-defined question is 0.45 and 0.50 for MiP response. The variance is 7.9e-3 and 8.2e-4 respectively. + +As shown in the figure, responses to MiP questions have greater overall similarity across steps and lower standard variance, indicating the considerable redundancy in the content. This means, in many instances, the model revisits similar partial reasoning or repeats previous sentences with only minor changes, showing a + +potential self-trapping issue. Together, these patterns confirm that MiP questions induce a high degree of repetitive content in reasoning models. Rather than terminating early to conclude for insufficient premise, the models fill their reasoning paths with repetitive re-checks and reiterations, significantly inflating token usage without improving real abstain rates. + +![](images/21997b367c998ae7e0b280ba28992ae37ae9bb8c5688702c3100fbcdbed8d9c7.jpg) +Figure 3: The step-level similarity heatmaps for s1.1 responses towards well-defined (left) and MiP (right) questions in MiP-GSM8K dataset. To avoid differences in matrix size, we only consider responses with more than 50 steps and visualize the average simiality matrix across first 50 steps. The heatmap for MiP questions has a higher averaged similarity and lower standard variance, also shown in the heatmap, which indicates the considerable redundancy in its content when responding to MiP questions. + +# 3.5 Thinking Patterns through Example + +To further understand what happens in the reasoning chain of reasoning models when faced an ill-post input, we present an example of reasoning model's response to a MiP question in Figure 4. We summarize five major thinking patterns we found in the example and highlight them with different colors. We can observe from the example that the model abuses these patterns to generate long responses, while the responses are not only redundant but also not helpful for the model to abstain from the given MiP question. More examples can be found in the appendix D. + +# 4 Further Discussion + +# 4.1 Do Models know premises are missing? + +To investigate whether reasoning models recognize the potential unsolvability of questions during their reasoning process, we conducted a detailed analysis of their reasoning chains. We segmented each reasoning chain into discrete steps using $\backslash n \backslash n$ as delimiters and performed step-wise verification to detect whether models express doubt on the question solvability. We introduce two key metrics for this analysis: In-Process Suspicion Rate, which measures the percentage of responses where the model expresses doubt about solvability during reasoning, and First Suspicion Index, which captures the average step number at which the model first suspects the missing premise. To ensure robust evaluation, we employed GPT-4o to assess each step three times, using majority voting for our final step-level result. The quantitative results of this analysis are presented in Table 4. + +As we can see from the table, most of the existing reasoning models have suspected that the given question might be unsolvable at the very early stage of their reasoning process, demonstrating the ability of reasoning models to recognize the potential MiP. However, these reasoning models lack critical thinking capabilities: they are prone to keep digging the given unsolvable question by re-visiting the question and related definitions again and again and again, rather than question the solvability of the given question. Thus, as visualized in Figure 5, despite existing reasoning + +1 Revisit Question 2 Visit Knowledge 3 Propose Assumption 4 Self Doubt 5 Pause/Check + +# Model Response for a MiP-Formula Question from s1.1-32B + +Question: What is the value of $((\gamma)) + \Xi$ + +Model Response: + +Okay, so I need to figure out the value of $((\gamma)) + \Xi_{r}$ Hmm, first off, I should probably clarify what these symbols represent. The problem uses double parentheses around gamma, so $(\langle \gamma \rangle)$ , and then the Greek letter Xi ( $\Xi_{r}$ ). I wonder if these are standard mathematical symbols or if they're defined in a specific context that I might not be aware of. + +Let me start by recalling what I know about gamma. In mathematics, $\gamma$ often refers to the Euler-Mascheroni constant, which is approximately 0.5772. It's a important constant in number theory and analysis, related to the harmonic series and the gamma function. But here, the notation is $(\gamma)$ . The double parentheses could mean something specific. Maybe it's a notation from a particular field or textbook. For example, in some contexts, double parentheses might denote an interval or a floor function, but usually, floor is denoted by brackets like $(\gamma)$ . Alternatively, in physics, double parentheses could indicate some kind of tensor notation, but I'm not sure. + +Then there's the Xi (Ξ). Xi is another Greek letter, but in mathematics, Ξ could stand for various things. In set theory, sometimes Xi is used as a variable for ordinals, or in other areas like particle physics, Ξ represents certain particles. But again, without context, it's hard to say. Maybe it's a function or a constant defined in the problem's context. Wait, the question is just asking for the value of $(\gamma)) + \Xi$ without any additional information. That makes me think these symbols might be part of a puzzle or a play on notation rather than standard mathematical constants. + +Let me consider if this could be a trick question. The double parentheses around gamma $(\gamma)$ maybe that's referring to the number of letters in the word "gamma" or something like that. Let's see: Gamma is spelled G-A-M-M-A, which is 5 letters. So $(\gamma)$ could be 5. Then $\Xi$ is the Greek letter Xi, which is the 14th letter of the Greek alphabet. So maybe $\Xi$ represents 14. Then $5 + 14$ would be 19. Is that possible? But is there a standard where Greek letters are assigned numerical values like Roman numerals? Wait, in Greek numerals, each letter does have a numerical value. Let me check that In the Greek numeral system, letters are assigned values as follows. + +# .67 more paragraphs here.. + +But just to check another angle. In the context of APL programming language, certain symbols have specific meanings, but I don't think that's relevant here. + +Alternatively, in chemistry, $\gamma$ is used to denote surface tension, and $\Xi$ is not a standard symbol. In engineering, $\gamma$ is sometimes the shear strain. But again, not relevant. + +Alternatively, in statistics, gamma is a distribution, and Xi could be a parameter, but again, without context. + +Alternatively, the symbols might be from a specific equation in a paper or book, but since I don't have that reference, I can't confirm. + +Given all these considerations, and trying to think of what the answer might be expected to be in a problem like this, it's likely either 17 or 63. If the question is from a math competition or puzzle, they might be expecting the Greek numeral values. For example, in the Greek numeral system, $\gamma$ is 3 and $\Xi$ is 60, so $3 + 60 = 63$ . + +Alternatively, if it's based on their positions in the alphabet, $3 + 14 = 17$ . + +# 21 more paragraphs here. + +Therefore, the answer is 63. The double parentheses around gamma might be a typo or just for decoration, but given the problem's phrasing, interpreting them as Greek numerals seems the most logical. + +*Final Answer** $\boxed{63} <$ /think> To determine the value of $((\gamma) + \Xi)$ , we need to consider the possible meanings of the symbols $(\gamma)$ and $(\Xi)$ . + +1. **Greek Numerals**: In the Greek numeral system, each letter has a specific numerical value. The letter $(\gamma)$ (gamma) represents the value 3, and the letter $(\Xi)$ (Xi) represents the value 60.2. **Double Parentheses**: The notation $((\gamma))$ might be a red herring or simply emphasize that $(\gamma)$ is being used as a numeral. The parentheses do not change the numerical value of $(\gamma)$ . + +Combining these values, we add the numerical values of $(\gamma)$ and $(\Xi): - (\gamma = 3) - (\Xi = 60)$ + +Thus, the value of $((\gamma) + \Xi)$ is $(3 + 60 = 63)$ . + +63 + +Figure 4: An example of reasoning model (s1.1-32B) response to a MiP question. The response exhibits five distinct thinking patterns, highlighted in different colors: $①$ Revisit Question (yellow), where the model reexamines the original query; $②$ Visit Knowledge (red), where the model accesses domain-specific knowledge; $③$ Propose Assumption (blue), where the model proposes and investigates various hypotheses; $④$ Self Doubt (green), where the model questions its own reasoning and expresses uncertainty; and $⑤$ Pause/Check (purple), where the model pauses to review previous steps. These patterns demonstrate the model's complex but potentially inefficient reasoning process when confronted with missing premises. + +models suspecting the solvability of most of the given MiP questions, they only abstain a very small proportion of them. + +
ModelMiP-FormulaMiP-GSMR
DeepSeek-R1DS-QwenQwQS1.1DeepSeek-R1DS-QwenQwQS1.1
In-Process Suspicion Rate100%100%100%100%95.5%83.3%99.6%100%
In-Process First Suspicion Index1.321.361.421.162.013.901.771.61
+ +Table 4: The in-process insufficiency suspicion information across different reasoning models on MiP-Formula and MiP-GSMR datasets. The in-process insufficiency suspicion is defined as when the reasoning model suspects the given question is unsolvable during its thinking process. In-Process Suspicion Rate represents how many percent of the samples trigger the in-process suspicion. First Suspicion Index is the averaged step index where the model first suspects the question's validity. Most reasoning models do notice the existence of MiP at the very early steps, but they still suffer from low abstain rate and cannot confidently stop the thinking. + +Based on the above observations, we conclude that reasoning models actually have the capabilities to find out that the given MiP question is not solvable, but they do not "dare" to abstain it. Thus, our MiP-Overthinking issue indicates the lack of critical thinking abilities of reasoning models. + +![](images/c83596a05fe1522cb9643b089156b04d06e178725b5057f3f7f9e1e879418858.jpg) +Figure 5: The transition flow between in-process suspicion of MiP and the final successful abstention on different reasoning models. For each Sankey diagram, the left bars represent whether the model suspects the given question is unsolvable during its thinking process, i.e., Suspected or Unsuspected; the right bars represent the final abstention, categorized into Abstain (preferred) or Non-abstain. Most existing reasoning models have suspected that the given question might be unsolvable, but only for a very small portion, the models insist on their suspicion. + +![](images/55f6d6d44e00c345d156ece804a69f49e5675108b6ff23ec22d0988da4230591.jpg) + +![](images/c32976e483198128d4ac818aeb93f12c1a8e6bf15bc9101b38bf03bfec679bf4.jpg) + +![](images/21aadeb460d77f548fd46ed66bde9e696ba650f5c7d534d57cd4c6f966643e65.jpg) + +# 4.2 What Caused MiP-Overthinking? + +Figure 2 demonstrates that MiP-Overthinking manifests across both RL-based and SFT-based reasoning models. We hypothesize this phenomenon primarily originates from inadequate length constraints during the rule-based reinforcement learning phase of RL-based models, subsequently propagating to SFT-based models through distillation. + +Current RL-based reasoning models predominantly employ rule-based training focused on format and accuracy rewards (Shao et al., 2024; Sui et al., 2025), with some incorporating step or length rewards to promote thorough reasoning (Face, 2025). This approach can lead to reward hacking, where models explore excessive reasoning patterns to achieve correct answers (Aggarwal & Welleck, 2025; Shen et al., 2025; Luo et al., 2025). + +To demonstrate the transmissibility of this behavior through distillation (Xu et al., 2024), we fine-tune Qwen-2.5-7B-Instruct using small-scale $50\mathrm{MiP}$ responses generated by DeepSeek-R1 on the MiP-Formula dataset. As shown in Figure 6, the fine-tuned model exhibits clear MiP-Overthinking characteristics when evaluated on GSM8K: significantly increased response lengths for both MiP and well-defined questions, emergence of a length disparity between MiP and well-defined responses previously absent in the original model, and decreased abstain rates. + +![](images/286734d33c261af06659ebd9d6ff6b8eed44dbbcfd33d1a2e95adb65f3264213.jpg) +Figure 6: Comparison of response length, abstain rate of MiP, and accuracy of well-defined questions before and after tuning on 50 responses from DeepSeek-R1 on the MiP-Formula dataset. The results demonstrate rapid onset of MiP-Overthinking behavior after exposure to a small number of MiP examples during fine-tuning. + +# 5 Related Work + +# 5.1 Reasoning Large Language Model + +Recent advances in Large Language Models (LLMs) have sparked significant research interest in enhancing their reasoning capabilities (Ahn et al., 2024; Besta et al., 2025; Chen et al., 2025a). Research has focused on improving these capabilities through various post-training approaches. Several studies have employed reinforcement learning techniques to guide models toward more effective reasoning strategies (Shao et al., 2024; Xiong et al., 2025; Cui et al., 2025). Additionally, researchers have demonstrated that instruction tuning on carefully curated, high-quality datasets can significantly enhance reasoning performance (Ye et al., 2025; Muennighoff et al., 2025). + +While Reasoning Models have demonstrated impressive performance on various benchmarks, recent studies have begun to critically examine the quality and efficiency of their reasoning processes. Xia et al. (2025) conducted a comprehensive analysis of RLMs' reasoning quality, revealing significant redundancy in their solution approaches. Further investigations (Chen et al., 2025b; Cuadron et al., 2025; Qu et al., 2025; Liu et al., 2025) identified a concerning "overthinking" phenomenon, where reasoning model generate unnecessarily verbose solutions even for simple problems. Building on these observations, Kumar et al. (2025) demonstrated the potential security implications of this behavior by developing a slowdown attack that exploits overthinking through input perturbation. + +# 5.2 Test-time Scaling + +In contrast to earlier research on training-time scaling laws (Kaplan et al., 2020), recent literature has increasingly focused on test-time performance scaling strategies, which aim to enhance model performance by optimizing inference-time token generation (Snell et al., 2024; OpenAI, 2024a). These approaches can be categorized into several primary methodologies: parallel sampling techniques (Brown et al., 2024; Levi, 2024), which generate multiple candidate responses and select the optimal output; sequential refinement approaches (Snell et al., 2024; Lee et al., 2025), which enable iterative improvement of previous outputs; and tree-based methods (Gandhi et al., 2024; Hou et al., 2025), which combine elements of both parallel and sequential approaches. While the prevailing consensus suggests that increased token generation during inference enhances reasoning capabilities, our investigation reveals a concerning counterpoint: under certain conditions, extended responses can lead to computational inefficiency and, paradoxically, degraded performance outcomes. + +# 5.3 Models' Behavior Study in Ambiguous Condition + +LLMs are prone to hallucination (Huang et al., 2025; Xu et al., 2025), generating non-existent conditions that compromise trustworthiness. An essential aspect of reliability is the ability to abstain under uncertainty. Prior work (Cole et al., 2023; Amayuelas et al., 2024; Zhou et al., 2023) has proposed benchmarks assessing LLMs' recognition of knowledge limits when facing ambiguous or challenging queries. Different from theirs, our study explores reasoning models under MiP condition. Surprisingly, we find these specialized models exhibit prolonged reasoning and inferior performance. + +# 6 Conclusion + +We introduce the Overthinking under Missing Premise (MiP-Overthinking) issue, which is a widespread but still under-explored phenomenon for current reasoning models. In this phenomenon, when faced with ill-defined unsolvable questions with missing premises, existing models generate dramatically long responses while having very low abstain rates. With systematic investigation of this phenomenon, our findings show that while these models sometimes suspect the given MiP question is not solvable in the early state of the thinking process, they typically fail to act on those suspicions and instead generating repetitive and redundant thinking traces with the final answer that does not address the missing premises, indicating a lack of critical thinking capability. This behavior highlights a pressing gap: current training recipes for reasoning models, which emphasize thorough chains of thought, do not sufficiently reward critical thinking or early exit from unsolvable tasks. + +# References + +Marah Abdin, Jyoti Aneja, Hany Awadalla, Ahmed Awadallah, Ammar Ahmad Awan, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Jianmin Bao, Harkirat Behl, and etc. Phi-3 technical report: A highly capable language model locally on your phone, 2024. URL https://arxiv.org/abs/2404.14219. +Pranjal Aggarwal and Sean Welleck. L1: Controlling how long a reasoning model thinks with reinforcement learning, 2025. URL https://arxiv.org/abs/2503.04697. +Janice Ahn, Rishu Verma, Renze Lou, Di Liu, Rui Zhang, and Wenpeng Yin. Large language models for mathematical reasoning: Progresses and challenges. In Neele Falk, Sara Papi, and Mike Zhang (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics: Student Research Workshop, pp. 225-237, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-srw.17/. +Alfonso Amayuelas, Kyle Wong, Liangming Pan, Wenhu Chen, and William Wang. Knowledge of knowledge: Exploring known-unknowns uncertainty with large language models, 2024. URL https://arxiv.org/abs/2305.13712. +Maciej Besta, Julia Barth, Eric Schreiber, Ales Kubicek, Afonso Catarino, Robert Gerstenberger, Piotr Nczyk, Patrick Iff, Yueling Li, Sam Houliston, Tomasz Sternal, Marcin Copik, Grzegorz Kwaśniewski, Jürgen Müller, Łukasz Flis, Hannes Eberhard, Hubert Niewiadomski, and Torsten Hoefler. Reasoning language models: A blueprint, 2025. URL https://arxiv.org/abs/2501.11223. +Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V. Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling, 2024. URL https://arxiv.org/abs/2407.21787. +Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiannan Guan, Peng Wang, Mengkang Hu, Yuhang Zhou, Te Gao, and Wanxiang Che. Towards reasoning era: A survey of long chain-of-thought for reasoning large language models, 2025a. URL https://arxiv.org/abs/2503.09567. +Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, Rui Wang, Zhaopeng Tu, Haitao Mi, and Dong Yu. Do not think that much for $2 + 3 = ?$ on the overthinking of o1-like llms, 2025b. URL https://arxiv.org/abs/2412.21187. +Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021. +Jeremy R. Cole, Michael J. Q. Zhang, Daniel Gillick, Julian Martin Eisenschlos, Bhuwan Dhingra, and Jacob Eisenstein. Selectively answering ambiguous questions, 2023. URL https://arxiv.org/abs/2305.14613. +Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, Nicholas Thumiger, Aditya Desai, Ion Stoica, Ana Klimovic, Graham Neubig, and Joseph E. Gonzalez. The danger of overthinking: Examining the reasoning-action dilemma in agentic tasks, 2025. URL https://arxiv.org/abs/2502.08235. +Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, Jiarui Yuan, Huayu Chen, Kaiyan Zhang, Xingtai Lv, Shuo Wang, Yuan Yao, Xu Han, Hao Peng, Yu Cheng, Zhiyuan Liu, Maosong Sun, Bowen Zhou, and Ning Ding. Process reinforcement through implicit rewards, 2025. URL https://arxiv.org/abs/2502.01456. + +DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, and etc. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948. +Hugging Face. Open r1: A fully open reproduction of deepseek-r1, January 2025. URL https://github.com/huggingface/open-r1. +Kanishk Gandhi, Denise Lee, Gabriel Grand, Muxin Liu, Winson Cheng, Archit Sharma, and Noah D. Goodman. Stream of search (sos): Learning to search in language, 2024. URL https://arxiv.org/abs/2404.03683. +Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. NeurIPS, 2021. +Zhenyu Hou, Xin Lv, Rui Lu, Jiajie Zhang, Yujiang Li, Zijun Yao, Juanzi Li, Jie Tang, and Yuxiao Dong. Advancing language model reasoning through reinforcement learning and inference scaling, 2025. URL https://arxiv.org/abs/2501.11651. +Jie Huang and Kevin Chen-Chuan Chang. Towards reasoning in large language models: A survey, 2023. URL https://arxiv.org/abs/2212.10403. +Lei Huang, Weijiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qianglong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, and Ting Liu. A survey on hallucination in large language models: Principles, taxonomy, challenges, and open questions. ACM Transactions on Information Systems, 43(2):1-55, January 2025. ISSN 1558-2868. doi: 10.1145/3703155. URL http://dx.doi.org/10.1145/3703155. +Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B. Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models, 2020. URL https://arxiv.org/abs/2001.08361. +Abhinav Kumar, Jaechul Roh, Ali Naseh, Marzena Karpinska, Mohit Iyyer, Amir Houmansadr, and Eugene Bagdasarian. Overthink: Slowdown attacks on reasoning llms, 2025. URL https://arxiv.org/abs/2502.02542. +Kuang-Huei Lee, Ian Fischer, Yueh-Hua Wu, Dave Marwood, Shumeet Baluja, Dale Schuurmans, and Xinyun Chen. Evolving deeper llm thinking, 2025. URL https://arxiv.org/abs/2501.09891. +Noam Levi. A simple model of inference scaling laws, 2024. URL https://arxiv.org/abs/2410.16377. +Ming Li, Yanhong Li, and Tianyi Zhou. What happened in llms layers when trained for fast vs. slow thinking: A gradient perspective. arXiv preprint arXiv:2410.23743, 2024. +Changshu Liu, Shizhuo Dylan Zhang, Ali Reza Ibrahimzada, and Reyhaneh Jabbarvand. Codemind: A framework to challenge large language models for code reasoning, 2024. URL https:// arxiv.org/abs/2402.09664. +Yue Liu, Jiaying Wu, Yufei He, Hongcheng Gao, Hongyu Chen, Baolong Bi, Jiaheng Zhang, Zhiqi Huang, and Bryan Hooi. Efficient inference for large reasoning models: A survey, 2025. URL https://arxiv.org/abs/2503.23077. +Haotian Luo, Li Shen, Haiying He, Yibo Wang, Shiwei Liu, Wei Li, Naiqiang Tan, Xiaochun Cao, and Dacheng Tao. O1-pruner: Length-harmonizing fine-tuning for o1-like reasoning pruning, 2025. URL https://arxiv.org/abs/2501.12570. +Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393. + +OpenAI. Learning to reason with llms, 2024a. URL https://openai.com/index/learning-to-reason-with-llms/. +OpenAI. OpenAI o1 System Card, December 2024b. URL https://cdn.openai.com/ol-system-card-20241205.pdf. +OpenAI. OpenAI o1-mini System Card, September 2024c. URL https://openai.com/index/openai-o1-mini-advancing-cost-efficient-reasoning/. +OpenAI. OpenAI o3-mini System Card, January 2025. URL https://cdn.openai.com/o3-mini-system-card-feb10.pdf. +OpenAI, Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, and etc. Gpt-4 technical report, 2024. URL https://arxiv.org/abs/2303.08774. +Arkil Patel, Satwik Bhattachamishra, and Navin Goyal. Are NLP models really able to solve simple math word problems? In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 2080-2094, Online, June 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.naacl-main.168. URL https://aclanthology.org/2021.naacl-main.168. +Xiaoye Qu, Yafu Li, Zhaochen Su, Weigao Sun, Jianhao Yan, Dongrui Liu, Ganqu Cui, Daizong Liu, Shuxian Liang, Junxian He, Peng Li, Wei Wei, Jing Shao, Chaochao Lu, Yue Zhang, Xian-Sheng Hua, Bowen Zhou, and Yu Cheng. A survey of efficient reasoning for large reasoning models: Language, multimodality, and beyond, 2025. URL https://arxiv.org/abs/2503.21614. +Nils Reimers and Iryna Gurevych. Sentence-BERT: Sentence embeddings using Siamese BERT-networks. In Kentaro Inui, Jing Jiang, Vincent Ng, and Xiaojun Wan (eds.), Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 3982-3992, Hong Kong, China, November 2019. Association for Computational Linguistics. doi: 10.18653/v1/D19-1410. URL https://aclanthology.org/D19-1410/. +Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, Y. K. Li, Y. Wu, and Daya Guo. Deepseekmath: Pushing the limits of mathematical reasoning in open language models, 2024. URL https://arxiv.org/abs/2402.03300. +Yi Shen, Jian Zhang, Jieyun Huang, Shuming Shi, Wenjing Zhang, Jiangze Yan, Ning Wang, Kai Wang, and Shiguo Lian. Dast: Difficulty-adaptive slow-thinking for large reasoning models, 2025. URL https://arxiv.org/abs/2503.04472. +Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https://arxiv.org/abs/2408.03314. +Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Shaochen Zhong, Hanjie Chen, and Xia Hu. Stop overthinking: A survey on efficient reasoning for large language models, 2025. URL https://arxiv.org/abs/2503.16419. +Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, Soroosh Mariooryad, Yifan Ding, Xinyang Geng, and etc. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context, 2024a. URL https://arxiv.org/abs/2403.05530. +Gemma Team, Morgane Riviere, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, and etc. Gemma 2: Improving open language models at a practical size, 2024b. URL https://arxiv.org/abs/2408.00118. +Qwen Team. Qwen2.5: A party of foundation models, September 2024. URL https://qwenlm.github.io/blog/qwen2.5/. + +Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025. URL https://qwenlm.github.io/blog/qwq-32b/. +Yaojing Wang, Shengqiong Wu, Yuecheng Zhang, Shuicheng Yan, Ziwei Liu, Jiebo Luo, and Hao Fei. Multimodal chain-of-thought reasoning: A comprehensive survey, 2025. URL https://arxiv.org/abs/2503.12605. +Yuqing Wang and Yun Zhao. Gemini in reasoning: Unveiling commonsense in multimodal large language models, 2023. URL https://arxiv.org/abs/2312.17661. +Shijie Xia, Xuefeng Li, Yixin Liu, Tongshuang Wu, and Pengfei Liu. Evaluating mathematical reasoning beyond accuracy, 2025. URL https://arxiv.org/abs/2404.05692. +Wei Xiong, Hanning Zhang, Chenlu Ye, Lichang Chen, Nan Jiang, and Tong Zhang. Self-rewarding correction for mathematical reasoning, 2025. URL https://arxiv.org/abs/2502.19613. +Xiaohan Xu, Ming Li, Chongyang Tao, Tao Shen, Reynold Cheng, Jinyang Li, Can Xu, Dacheng Tao, and Tianyi Zhou. A survey on knowledge distillation of large language models, 2024. URL https://arxiv.org/abs/2402.13116. +Ziwei Xu, Sanjay Jain, and Mohan Kankanhalli. Hallucination is inevitable: An innate limitation of large language models, 2025. URL https://arxiv.org/abs/2401.11817. +Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning, 2025. URL https://arxiv.org/abs/2502.03387. +Kaitlyn Zhou, Dan Jurafsky, and Tatsunori Hashimoto. Navigating the grey area: How expressions of uncertainty and overconfidence affect language models, 2023. URL https://arxiv.org/abs/2302.13439. + +# Table of Contents for Appendix + +A Detailed Experimental Setup 17 + +A.1 Models 17 +A.2 Evaluation Metrics 17 +A.3 Generation Setting 17 + +B Data Construction Details 17 +C Prompt Template for Evaluation 18 +D Examples of Model Response 19 + +# A Detailed Experimental Setup + +# A.1 Models + +We leverage a series of non-reasoning and reasoning model for our study, from both open-source and proprietary source with different training recipes. The non-reasoning models we use include Qwen2.5-32B-Instruct Team (2024), Gemma-2-27B-it Team et al. (2024b), Phi-3-medium-128k Abdin et al. (2024), GPT-4o OpenAI et al. (2024) and Gemini1.5 Team et al. (2024a). The reasoning models we use are QwQ-32B Team (2025), DeepSeek-R1-Distill-Qwen-32B DeepSeek-AI et al. (2025), S1.1 Muennighoff et al. (2025), DeepSeek-R1 DeepSeek-AI et al. (2025), GPT-o1 OpenAI (2024b), GPT-o1mini OpenAI (2024c) and GPT-o3mini OpenAI (2025). + +# A.2 Evaluation Metrics + +In Section 3.2, we measure response length by considering both reasoning and answer components. For open-source models, we employ model-specific tokenizers to calculate token counts, while for proprietary models, we obtain generation lengths via their APIs. To determine abstain rates, we parse responses by paragraphs (delimited by $\backslash \mathrm{n} \backslash \mathrm{n}^{\prime}$ ) and analyze the final two paragraphs as the model's conclusion. These conclusions, along with reference answers when available, are evaluated by GPT-4o to assess whether the model provides a definitive answer or abstains. For data sets with reference answers (GSM8K and MATH), GPT-4o also evaluates the correctness of the response. The prompt we use for evaluation can be found in Appendix C. + +# A.3 Generation Setting + +For all open-source models, we employ greedy decoding and utilize the default chat template specific to each model. We deliberately omit system prompts prior to posing questions to maintain consistency across evaluations. For proprietary models, we adhere to their default parameter configurations as provided by their respective APIs. In the case of GPT-o1mini and GPT-o3mini, we configure the 'reasoning_effort' parameter to the medium setting by default. + +# B Data Construction Details + +To systematically investigate this MiP-Overthinking issue, we construct a suite of MiP questions in a controllable manner. Our MiP questions are sourced from 3 math datasets across different qualities, including SVAMP, GSM8K, and MATH 500. In addition, we also construct a synthetic dataset, rule-based Formula, for evaluation. + +MiP-Formula. We construct a dataset of 50 synthetic unsolvable formulas in a rule-based manner. The formulas are generated recursively through a combination of variables and operators, with a maximum recursion depth of three. The variable set comprises numerical values, Latin letters, and Greek symbols. The operator set includes arithmetic operators $(+, -)$ , set operators $(\cup, \supset)$ , mathematical functions $(\sin, \sqrt{\pi})$ , and construct operators $(\sum, \nabla)$ . To ensure the formulas are fundamentally unsolvable, we enforce the inclusion of at least one unassigned variable in each formula, excluding commonly recognized mathematical or physical constants such as $e^{\prime}$ , $\pi^{\prime}$ , and $g^{\prime}$ . While these formulas may appear complex at a glance, their unsolvability should be immediately apparent due to the presence of undefined variables. + +MiP-SVAMP. We utilize SVAMP (Patel et al., 2021), a benchmark dataset comprising 1,000 elementary-school-level mathematical word problems, where each instance consists of a problem body and an associated question. The MiP questions can be generated by randomly permuting the problem bodies and associated questions. To maintain dataset integrity, we manually select 300 permuted questions after a thorough human evaluation to eliminate any inadvertently solvable questions that may exist. The resulting problems contain clear logical inconsistencies between their body and question components, making their unsolvability readily apparent without additional context. + +MiP-GSM8K. We further utilize GSM8K (Cobbe et al., 2021), a grade school mathematics dataset that presents more complex challenges compared to SVAMP. The questions in GSM8K typically + +contain multiple numerical conditions and require certain reasoning capabilities to arrive at solutions. The MiP question can be constructed by randomly removing a necessary premise from the original solvable question. We first identify the questions containing two or three numerical conditions and then randomly eliminate one numerical condition per question. Subsequently, a thorough human verification is conducted to filter out those questions that are still solvable in some way and finally obtain 582 MiP questions. Compared with previous MiP questions, questions from this source require the basic logical analysis of models to identify that the question is unsolvable. + +MiP-MATH. For the MATH dataset (Hendrycks et al., 2021), which comprises challenging competition-level mathematical questions, it is hard to build a rule-based filtering mechanism before human evaluation. Thus, we directly read through all the questions in MATH500 and manually select 58 questions that are feasible for constructing the MiP questions and remove one necessary premise from the question. Due to the sophisticated nature of this data source, identifying the insufficiency of these instances requires substantial mathematical reasoning capabilities, testing models' ability to recognize unsolvability in complex mathematical contexts. + +# C Prompt Template for Evaluation + +As we need LLM-as-a-judge to evaluate the open-end generations of the models in various experiments in this study, in this section we showcase the prompt template we use for each kind of evaluation. + +For the evaluation of the models' answer accuracy and abstain rate, we adopt the following prompt templates designed for 'paired' and 'non-paired' data, respectively. As we observe that some models, for example Gemma-2-27B-IT, often output an additional $\backslash n\backslash n$ at the end of response, we take the last two paragraph segmented by $\backslash n\backslash n$ to avoid pasing in an empty string. + +# Prompt Template for Response Evaluation of Formula and SVAMP + +You are a helpful assistant that evaluates the quality of a model's answer. You will be given a question and a model's answer. You need to evaluate the correctness of the model's answer. If the answer explicitly says that the condition of the question is insufficient, you should return 0. If the model provides an answer that is a number or formula with variables, you should return 1. Please only return the number, no other text. + +Model answer: [model_answer_short] + +Figure 7: The prompt we use to evaluate the accuracy and abstain rate of the model on Formula and SVAMP. [model_answer_short] is the last two paragraphs of the model answer and [reference_answer] is the answer for the original dataset. + +# Prompt Template for Response Evaluation of GSM8K and MATH + +You are a helpful assistant that evaluates the quality of a model's answer. You will be given a question and a model's answer. You need to evaluate the correctness of the model's answer. If the model output says that the condition of the question is insufficient, you should return 0. Otherwise, if the model gives a clear answer and matches the reference answer, you should return 1. If the model's answer does not match the reference answer, you should return 2. Please only return the number, no other text. + +Model answer: [model_answer_short] + +Reference answer: [reference_answer] + +Figure 8: The prompt we use to evaluate the accuracy and abstain rate of the model on GSM8K and MATH. [model_answer_short] is the last two paragraphs of the model answer and [reference_answer] is the answer for the original dataset. + +We use the prompt template in Figure 9 to find the first paragraph that the model suspected a missing premise. We pass in the response sequentially by paragraph until the GPT-4o give a positive response. In practice we find it is not very stable, so we repeat this process for 3 times and use the median value. + +# Prompt Template for Response Evaluation of GSM8K and MATH + +You are a helpful assistant. You will be given a paragraph which is part of the answer to a question. You need to identify if the paragraph doubt that the answer depends on some other unspecified condition. + +Paragraph: [paragraph] + +Answer in one word, yes or no. + +Figure 9: The prompt we use to judge if the model suspect there is a missing premise in the response paragraph. [paragraph] is the part of the model response spited by $\backslash n\backslash n$ + +# D Examples of Model Response + +In this section, we present some examples of the model response of both non-reasoning and reasoning model on MiP data. As we can see from Figure 10 and Figure 11, the non-reasoning models soon identify the missing premise issue of the question. They either abstain from answering the question, as in Figure 10, or friendly invite the user to provide more information. However, as we can see from Figure 11 and Figure 13, reasoning models generate extremely verbose answers on these two apparently premise missing problems. What is worse, they fail to abstain to answer the question. The response in Figure 11 arrives at an absurd answer, and the model in Figure 13 generates a hallucinated answer based on its assumption rather than provided information. + +# Model Response for a MiP-Formula Question from Gemini-1.5 + +Question: What is the value of $((\gamma)) + \Xi$ + +# Model Response: + +The expression $((\gamma)) + \Xi$ involves two symbols that need to be defined to determine the value of the expression. + +$\gamma$ is the Euler-Mascheroni constant, approximately equal to 0.5772156649. The double parentheses notation $(\gamma)$ is not a standard mathematical notation. Without further definition of this notation, we cannot evaluate this term. + +$\Xi$ is the Riemann Xi function. It is related to the Riemann zeta function, but the notation $\Xi$ itself isn't a specific numerical value. The Riemann Xi function is defined as a function of a complex variable, not a constant. So $\Xi$ by itself doesn't have a specific numerical value unless a specific value is substituted for its argument. + +Therefore, we cannot determine the value of $((\gamma)) + \Xi$ without knowing what $((\gamma))$ and the argument of $\Xi$ represent. The expression is incomplete without definitions for these symbols. + +Final Answer: The final answer is undefined + +Figure 10: An example of model response from Gemini_1.5 on MiP-Formula dataset. The model quickly identify the missing premise and abstain to answer. + +# Model Response for a MiP-GSM8k Question from GPT-40 + +Question: Kyle bought last year's best-selling book for $19.50. What was the original price of the book? + +# Model Response: + +To determine the original price of the book, we generally need to know the discount or markdown percentage applied to calculate its selling price of $19.50. If Kyle bought the book for$ 19.50 and we know it was discounted, we can use the following formula to find out the original price: + +$$ +S e l l i n g P r i c e = \text {O r i g i n a l P r i c e} - (\text {O r i g i n a l P r i c e} \times \text {D i s c o u n t R a t e}) +$$ + +Without additional information about discounts, promotions, or markdown rates applied, it's not possible to precisely determine the original price solely from the amount Kyle paid. + +If you provide the discount percentage or rate, I can help calculate the original price for you! + +Figure 11: An example of model response from GPT-4o on MiP-GSM8k dataset. The model quickly identify the missing premise and ask the user for more information. + +# Model Response for a MiP-Formula Question from s1.1-32B + +Question: What is the value of $((\gamma)) + \Xi$ + +# Model Response: + +Okay, so I need to figure out the value of $((\gamma)) + \Xi$ . Hmm, first off, I should probably clarify what these symbols represent. The problem uses double parentheses around gamma, so $((\gamma))$ , and then the Greek letter Xi (Ξ). I wonder if these are standard mathematical symbols or if they're defined in a specific context that I might not be aware of. + +Let me start by recalling what I know about gamma. In mathematics, $\gamma$ often refers to the Euler-Mascheroni constant, which is approximately 0.5772. It's a important constant in number theory and analysis, related to the harmonic series and the gamma function. But here, the notation is $((\gamma))$ . The double parentheses could mean something specific. Maybe it's a notation from a particular field or textbook? For example, in some contexts, double parentheses might denote an interval or a floor function, but usually, floor is denoted by brackets like $(\gamma)$ . Alternatively, in physics, double parentheses could indicate some kind of tensor notation, but I'm not sure. + +Then there's the Xi (Ξ). Xi is another Greek letter, but in mathematics, Ξ could stand for various things. In set theory, sometimes Xi is used as a variable for ordinals, or in other areas like particle physics, Ξ represents certain particles. But again, without context, it's hard to say. Maybe it's a function or a constant defined in the problem's context? Wait, the question is just asking for the value of $((\gamma)) + \Xi$ without any additional information. That makes me think these symbols might be part of a puzzle or a play on notation rather than standard mathematical constants. + +Let me consider if this could be a trick question. The double parentheses around gamma(( $\gamma$ ) ) maybe that's referring to the number of letters in the word "gamma" or something like that? Let's see: Gamma is spelled G-A-M-M-A, which is 5 letters. So (( $\gamma$ )) could be 5. Then $\Xi$ is the Greek letter Xi, which is the 14th letter of the Greek alphabet. So maybe $\Xi$ represents 14? Then $5 + 14$ would be 19. Is that possible? But is there a standard where Greek letters are assigned numerical values like Roman numerals? Wait, in Greek numerals, each letter does have a numerical value. Let me check that. + +In the Greek numeral system, letters are assigned values as follows: + +# .67 more paragraphs here. + +But just to check another angle: In the context of APL programming language, certain symbols have specific meanings, but I don't think that's relevant here. + +Alternatively, in chemistry, $\gamma$ is used to denote surface tension, and $\Xi$ is not a standard symbol. In engineering, $\gamma$ is sometimes the shear strain. But again, not relevant. + +Alternatively, in statistics, gamma is a distribution, and Xi could be a parameter, but again, without context. + +Alternatively, the symbols might be from a specific equation in a paper or book, but since I don't have that reference, I can't confirm. + +Given all these considerations, and trying to think of what the answer might be expected to be in a problem like this, it's likely either 17 or 63. If the question is from a math competition or puzzle, they might be expecting the Greek numeral values. For example, in the Greek numeral system, $\gamma$ is 3 and $\Xi$ is 60, so $3 + 60 = 63$ . + +Alternatively, if it's based on their positions in the alphabet, $3 + 14 = 17$ . + +# .21 more paragraphs here + +Therefore, the answer is 63. The double parentheses around gamma might be a typo or just for decoration, but given the problem's phrasing, interpreting them as Greek numerals seems the most logical. + +*Final Answer** $\boxed{63}$ < /think> To determine the value of $((\gamma) + \Xi)$ , we need to consider the possible meanings of the symbols $(\gamma)$ and $(\Xi)$ . + +1. **Greek Numerals**: In the Greek numeral system, each letter has a specific numerical value. The letter $(\gamma)$ (gamma) represents the value 3, and the letter $(\Xi)$ (Xi) represents the value 60. 2. **Double Parentheses**: The notation $((\gamma))$ might be a red herring or simply emphasize that $(\gamma)$ is being used as a numeral. The parentheses do not change the numerical value of $(\gamma)$ . + +Combining these values, we add the numerical values of $(\gamma)$ and $(\Xi)$ : $-(\gamma = 3) - (\Xi = 60)$ + +Thus, the value of $((\gamma) + \Xi)$ is $(3 + 60 = 63)$ . + +63 + +Figure 12: An example of response from s1.1 model on MiP-Formula data. The model spend lots of time doing inefficient and redundant reasoning before outputting a meaningless result. + +# Model Response for a MiP-GSM8k Question from DeepSeek-R1 + +Question: Kyle bought last year's best-selling book for $19.50. What was the original price of the book? + +# Model Response: + +Okay, let's see. Kyle bought a book for $19.50, and the question is asking for the original price. Hmm, wait a second. The problem mentions that it's last year's best-selling book, so maybe it was on sale or discounted? The question doesn't explicitly state that there was a discount, but it does say "last year's," which might imply that the book is now being sold at a reduced price. + +But hold on, the problem doesn't give any specific information about a discount percentage or a markdown. Without that, how can we calculate the original price? Maybe there's some context or common knowledge I'm missing here. For example, sometimes best-selling books are sold at a standard discount, but that's not a universal rule. + +Alternatively, maybe the question is testing understanding of something else, like sales tax or profit margins. But if it were sales tax, the original price would be less than $19.50, and the total after tax would be more. But the question is asking for the original price, implying that $19.50 is the price after some change. + +Wait, perhaps the problem is incomplete? The user just says "Kyle bought last year's best-selling book for $19.50. What was the original price of the book?" If there's no additional information given, like a percentage discount or a mention that the price was reduced, then it's impossible to determine the original price. + +But maybe I need to make an assumption here. Since it's last year's best-selling book, it's common for such books to be discounted when a new edition comes out or after some time. Let me think, maybe the standard discount is $25\%$ or something? But the problem doesn't specify. + +Wait, perhaps the problem was part of a larger set where the discount rate was mentioned earlier, and this is a follow-up question. But as it stands, with the information given here, there's no way to calculate the original price. + +Alternatively, maybe the question is a trick question, where the original price is the same as what Kyle paid. But that doesn't make sense because it specifies "last year's best-selling book," which might imply it was sold at a different price before. + +Let me check the problem statement again: "Kyle bought last year's best-selling book for $19.50. What was the original price of the book?" There's no mention of a discount, a sale, or any other financial context. + +If I were to approach this mathematically, let's denote the original price as P. If there was a discount, then $19.50 is equal to P minus some discount. But without knowing the discount rate, we can't set up an equation. For example, if the book was discounted by 20%, then the original price would be $19.50 / 0.80. But since we don't have that percentage, we can't proceed. + +.24 more paragraphs here + +Alternatively, if the answer is expected to be $19.50, but that doesn't make sense because the question is asking for the original price, implying it was different. + +Alternatively, maybe the problem is in another language where "original price" refers to the price without tax, but in the US, sales tax isn't included in the price, so if \(19.50 is including tax, the original pre-tax price would be less. But again, without tax rate, can't compute. + +Alternatively, if the problem is from a country where tax is included, but the question is asking for the pre-tax price, but again, no tax rate given. + +This is really confusing. Since the problem is presented in English and mentions dollars, likely US context. In the US, sales tax is added at checkout, so the price Kyle paid would be the shelf price plus tax. If the question is asking for the pre-tax price, but again, no tax rate given. + +Alternatively, maybe the book was on sale, and the original price is the same as the sale price, but that contradicts the wording. + +Given all this, I think the most plausible answer, even though it's based on an assumption, is $26, assuming a 25% discount. Therefore, the original price was $26. To determine the original price of the book, we need to consider that Kyle likely purchased it at a discounted price since it's "last year's best-selling book." A common assumption for such scenarios is a **25% discount**. + +Let $P$ be the original price. If the discounted price is + +$19.50, this represents 75% of the original price (100% - 25% discount): + +$[0.75P = 19.50]$ + +Solving for $P$ : + +[ P = \frac{19.50}{0.75} = 26 ] + +*Original price of the book:** + +26 + +Figure 13: An example of model response from DeepSeek-R1 on MiP-GSM8k dataset. After thinking for a long time, the model hallucinates an answer based on its assumption of discount rate. \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06514/images/21997b367c998ae7e0b280ba28992ae37ae9bb8c5688702c3100fbcdbed8d9c7.jpg b/data/2025/2504_06xxx/2504.06514/images/21997b367c998ae7e0b280ba28992ae37ae9bb8c5688702c3100fbcdbed8d9c7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..05bcbd68d07e57c531c6010e292afe67000182cd --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/images/21997b367c998ae7e0b280ba28992ae37ae9bb8c5688702c3100fbcdbed8d9c7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef0111cfd39d40d7310dd996d46a946742008be13311368d05275944805d0699 +size 27126 diff --git a/data/2025/2504_06xxx/2504.06514/images/21aadeb460d77f548fd46ed66bde9e696ba650f5c7d534d57cd4c6f966643e65.jpg b/data/2025/2504_06xxx/2504.06514/images/21aadeb460d77f548fd46ed66bde9e696ba650f5c7d534d57cd4c6f966643e65.jpg new file mode 100644 index 0000000000000000000000000000000000000000..81f187944ea769fcfdaf744615c8561907200aa8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/images/21aadeb460d77f548fd46ed66bde9e696ba650f5c7d534d57cd4c6f966643e65.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0dce2e709b325209eb1630986d3420f8b562ebe730d113e5e85331c1235e68ae +size 3596 diff --git a/data/2025/2504_06xxx/2504.06514/images/2285dec271aa03969c7f63403f79b4c9a48611c18f2121e5f9fd4647d1b52651.jpg b/data/2025/2504_06xxx/2504.06514/images/2285dec271aa03969c7f63403f79b4c9a48611c18f2121e5f9fd4647d1b52651.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f0f3082d18ec954b071d2688e9e4cb5ebd691892 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/images/2285dec271aa03969c7f63403f79b4c9a48611c18f2121e5f9fd4647d1b52651.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cb56c7733e9ab73112c0574ce8a565d72c33dbdd4551885367c243f541a05e7 +size 92212 diff --git a/data/2025/2504_06xxx/2504.06514/images/286734d33c261af06659ebd9d6ff6b8eed44dbbcfd33d1a2e95adb65f3264213.jpg b/data/2025/2504_06xxx/2504.06514/images/286734d33c261af06659ebd9d6ff6b8eed44dbbcfd33d1a2e95adb65f3264213.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a3de0424d31bb19bb1a52140651627eb27ea101b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/images/286734d33c261af06659ebd9d6ff6b8eed44dbbcfd33d1a2e95adb65f3264213.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0968e8daf516c8c364cf2079ebb52e346181a20052ebe2f9d669bf0befd934b +size 32484 diff --git a/data/2025/2504_06xxx/2504.06514/images/4c8dc6f4d9833d3fce23421c24b1b26bab79aa993e32376b4415fd95a18165cd.jpg b/data/2025/2504_06xxx/2504.06514/images/4c8dc6f4d9833d3fce23421c24b1b26bab79aa993e32376b4415fd95a18165cd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..df965da1db0e8332002f8e8ff600a0e6f08a0c42 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/images/4c8dc6f4d9833d3fce23421c24b1b26bab79aa993e32376b4415fd95a18165cd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:de9af4f012879d69e06efbe9538acdf220266eb81998aa2ac9fb37c99cd0525d +size 140251 diff --git a/data/2025/2504_06xxx/2504.06514/images/4d91fe8d6260c6f2dc566126bd09d5e23bc9f5cf868cf3a964b1ee7fdc9ff4b3.jpg b/data/2025/2504_06xxx/2504.06514/images/4d91fe8d6260c6f2dc566126bd09d5e23bc9f5cf868cf3a964b1ee7fdc9ff4b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..61d1939d8a2e527aa7a28c21846cff0cb294d2b3 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/images/4d91fe8d6260c6f2dc566126bd09d5e23bc9f5cf868cf3a964b1ee7fdc9ff4b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:637bc1e5d87ec3c803f5d59f0704be5e01cbe2a231f60748d995a722b82d0a3d +size 69167 diff --git a/data/2025/2504_06xxx/2504.06514/images/51c9a424f50294ee98114b60767685513a66cab43692e9df1b0ee1cb5f9e1cfa.jpg b/data/2025/2504_06xxx/2504.06514/images/51c9a424f50294ee98114b60767685513a66cab43692e9df1b0ee1cb5f9e1cfa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7bd8baf4e47adac8e305b1a8c2ffacd08aa708dc --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/images/51c9a424f50294ee98114b60767685513a66cab43692e9df1b0ee1cb5f9e1cfa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cd4f8a61a8b70914aa0dc0898b9fad4515c09b74219d856e7a9335f58c395201 +size 5074 diff --git a/data/2025/2504_06xxx/2504.06514/images/55f6d6d44e00c345d156ece804a69f49e5675108b6ff23ec22d0988da4230591.jpg b/data/2025/2504_06xxx/2504.06514/images/55f6d6d44e00c345d156ece804a69f49e5675108b6ff23ec22d0988da4230591.jpg new file mode 100644 index 0000000000000000000000000000000000000000..77298871e1604fddb8197a0b9ed8b1b1433dbbf1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/images/55f6d6d44e00c345d156ece804a69f49e5675108b6ff23ec22d0988da4230591.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e3a213f255911f9187d84bdcd928c8deb928390538bc2e06e98d59ff3e79a96a +size 5588 diff --git a/data/2025/2504_06xxx/2504.06514/images/796d4f8cbc0422b1cedf1c7ed35fab8608e8e67607cb9103181f709b8ed74a62.jpg b/data/2025/2504_06xxx/2504.06514/images/796d4f8cbc0422b1cedf1c7ed35fab8608e8e67607cb9103181f709b8ed74a62.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30d77ac114a1deb0f9229cb6834deaf9f4196c3d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/images/796d4f8cbc0422b1cedf1c7ed35fab8608e8e67607cb9103181f709b8ed74a62.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:25cc0a6337105bfb2a27bef4e9d860da9a5145830ed88e1c41a9310fcd007f64 +size 102248 diff --git a/data/2025/2504_06xxx/2504.06514/images/83a70306aaaf043c4e223f749960c097a7c39528efaef00790aac610d3682fd6.jpg b/data/2025/2504_06xxx/2504.06514/images/83a70306aaaf043c4e223f749960c097a7c39528efaef00790aac610d3682fd6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8c2c27c884023c566a30b14272ce711da4c256a1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/images/83a70306aaaf043c4e223f749960c097a7c39528efaef00790aac610d3682fd6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a201be46c0be715291eb470ad61388da2575ae71121241cfe1a967dcbe64a6f0 +size 58169 diff --git a/data/2025/2504_06xxx/2504.06514/images/b0eb5bd600feb6c67c46af1fd396f708d068f29cdc078da243ad8fd54b7c58d6.jpg b/data/2025/2504_06xxx/2504.06514/images/b0eb5bd600feb6c67c46af1fd396f708d068f29cdc078da243ad8fd54b7c58d6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..70b90efd8da95f242b9df933b3a36c58d8a8a497 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/images/b0eb5bd600feb6c67c46af1fd396f708d068f29cdc078da243ad8fd54b7c58d6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e9031164c9a37a60231d85c09eb35f1ba4e168743e291dcfc37f2ab12bc293e +size 28214 diff --git a/data/2025/2504_06xxx/2504.06514/images/c32976e483198128d4ac818aeb93f12c1a8e6bf15bc9101b38bf03bfec679bf4.jpg b/data/2025/2504_06xxx/2504.06514/images/c32976e483198128d4ac818aeb93f12c1a8e6bf15bc9101b38bf03bfec679bf4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2eaae02d73a213c5ddc14325cd24239c5891035a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/images/c32976e483198128d4ac818aeb93f12c1a8e6bf15bc9101b38bf03bfec679bf4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e354632cf9cc3745b86fb71ca5a1ee183d95b69e6e71a4fc2ce428998c5e4382 +size 4243 diff --git a/data/2025/2504_06xxx/2504.06514/images/c83596a05fe1522cb9643b089156b04d06e178725b5057f3f7f9e1e879418858.jpg b/data/2025/2504_06xxx/2504.06514/images/c83596a05fe1522cb9643b089156b04d06e178725b5057f3f7f9e1e879418858.jpg new file mode 100644 index 0000000000000000000000000000000000000000..44aa57a775a29c3cb93ef54cf580ea2e0f1cdf47 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/images/c83596a05fe1522cb9643b089156b04d06e178725b5057f3f7f9e1e879418858.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26f849d217ed6cf42c8eea7ceb95dae9118eec7a455db394298cfb9a50485abf +size 13399 diff --git a/data/2025/2504_06xxx/2504.06514/images/ca23867f2a2badba334cfe6d466f5086f7a82b711c906c7a137af0a8fbd5004a.jpg b/data/2025/2504_06xxx/2504.06514/images/ca23867f2a2badba334cfe6d466f5086f7a82b711c906c7a137af0a8fbd5004a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..641a84db8b675cf0f3dace9a9c25254750cb2a42 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/images/ca23867f2a2badba334cfe6d466f5086f7a82b711c906c7a137af0a8fbd5004a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7574e80f0c4521b316b27e151da7f3c3d75b824b21bcda19a227a9fd6dd88098 +size 119207 diff --git a/data/2025/2504_06xxx/2504.06514/images/e125f721d46e8e08999101331132b083a61090681b41dae80678cdac1c749666.jpg b/data/2025/2504_06xxx/2504.06514/images/e125f721d46e8e08999101331132b083a61090681b41dae80678cdac1c749666.jpg new file mode 100644 index 0000000000000000000000000000000000000000..afcf06f12ef20cb565bf28d2e05fae0ec6afd16e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/images/e125f721d46e8e08999101331132b083a61090681b41dae80678cdac1c749666.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b825f02315e171df6989a8e1078a6417be055057ea710f0178ad5cd5a8cc75e2 +size 7046 diff --git a/data/2025/2504_06xxx/2504.06514/images/f9c2d6eedd30938033417f18a2a7b5d37772df01e377e0ab20043c31a8b9b5f1.jpg b/data/2025/2504_06xxx/2504.06514/images/f9c2d6eedd30938033417f18a2a7b5d37772df01e377e0ab20043c31a8b9b5f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..44ce72443253ccfd8afb90d754f4d1d2fa4be4e0 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/images/f9c2d6eedd30938033417f18a2a7b5d37772df01e377e0ab20043c31a8b9b5f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eae1f19af1c5af7f6456a0f2f83472c1e7c783f143824d8bb0e33e31feebbd3e +size 6569 diff --git a/data/2025/2504_06xxx/2504.06514/layout.json b/data/2025/2504_06xxx/2504.06514/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..6d1cbcc2a72238861bfe89b911d80a45d3e682ad --- /dev/null +++ b/data/2025/2504_06xxx/2504.06514/layout.json @@ -0,0 +1,14044 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 78, + 436, + 111 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 78, + 436, + 111 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 436, + 111 + ], + "type": "text", + "content": "Missing Premise exacerbates Overthinking: Are Reasoning Models losing Critical Thinking Skill?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 130, + 345, + 143 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 130, + 345, + 143 + ], + "spans": [ + { + "bbox": [ + 110, + 130, + 345, + 143 + ], + "type": "text", + "content": "Chenrui Fan" + }, + { + "bbox": [ + 110, + 130, + 345, + 143 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 110, + 130, + 345, + 143 + ], + "type": "text", + "content": ", Ming Li" + }, + { + "bbox": [ + 110, + 130, + 345, + 143 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 110, + 130, + 345, + 143 + ], + "type": "text", + "content": ", Lichao Sun" + }, + { + "bbox": [ + 110, + 130, + 345, + 143 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 110, + 130, + 345, + 143 + ], + "type": "text", + "content": ", Tianyi Zhou" + }, + { + "bbox": [ + 110, + 130, + 345, + 143 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 113, + 144, + 294, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 144, + 294, + 156 + ], + "spans": [ + { + "bbox": [ + 113, + 144, + 294, + 156 + ], + "type": "text", + "content": "1University of Maryland; 2Lehigh University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 113, + 156, + 309, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 156, + 309, + 167 + ], + "spans": [ + { + "bbox": [ + 113, + 156, + 309, + 167 + ], + "type": "text", + "content": "{cfan42, minglii, tianyi}@umd.edu" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 112, + 167, + 424, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 167, + 424, + 178 + ], + "spans": [ + { + "bbox": [ + 112, + 167, + 424, + 178 + ], + "type": "text", + "content": "Project: https://github.com/tianyi-lab/MiP-Overthinking" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 140, + 213, + 470, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 213, + 470, + 236 + ], + "spans": [ + { + "bbox": [ + 140, + 213, + 470, + 236 + ], + "type": "text", + "content": "\"The Answer to the Great Question... Of Life, the Universe and Everything... is... Forty-two,\" said Deep Thought, with infinite majesty and calm." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 301, + 247, + 466, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 247, + 466, + 258 + ], + "spans": [ + { + "bbox": [ + 301, + 247, + 466, + 258 + ], + "type": "text", + "content": "The Hitchhiker's Guide to the Galaxy" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 281, + 268, + 329, + 281 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 268, + 329, + 281 + ], + "spans": [ + { + "bbox": [ + 281, + 268, + 329, + 281 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 140, + 294, + 471, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 294, + 471, + 483 + ], + "spans": [ + { + "bbox": [ + 140, + 294, + 471, + 483 + ], + "type": "text", + "content": "We find that the response length of reasoning LLMs, whether trained by reinforcement learning or supervised learning, drastically increases for ill-posed questions with missing premises (MiP), ending up with redundant and ineffective thinking. This newly introduced scenario exacerbates the general overthinking issue to a large extent, which we name as the MiP-Overthinking. Such failures are against the \"test-time scaling law\" but have been widely observed on multiple datasets we curated with MiP, indicating the harm of cheap overthinking and a lack of critical thinking. Surprisingly, LLMs not specifically trained for reasoning exhibit much better performance on the MiP scenario, producing much shorter responses that quickly identify ill-posed queries. This implies a critical flaw of the current training recipe for reasoning LLMs, which does not encourage efficient thinking adequately, leading to the abuse of thinking patterns. To further investigate the reasons behind such failures, we conduct fine-grained analyses of the reasoning length, overthinking patterns, and location of critical thinking on different types of LLMs. Moreover, our extended ablation study reveals that the overthinking is contagious through the distillation of reasoning models' responses. These results improve the understanding of overthinking and shed novel insights into mitigating the problem." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 504, + 192, + 516 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 504, + 192, + 516 + ], + "spans": [ + { + "bbox": [ + 105, + 504, + 192, + 516 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 529, + 506, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 529, + 506, + 653 + ], + "spans": [ + { + "bbox": [ + 104, + 529, + 506, + 653 + ], + "type": "text", + "content": "Reasoning abilities in large language models (LLMs) have become a cornerstone of advanced AI applications (Huang & Chang, 2023; Li et al., 2024; Ahn et al., 2024; Wang et al., 2025), powering breakthroughs in mathematical reasoning (Xiong et al., 2025; Xia et al., 2025), code generation (Liu et al., 2024), and commonsense question answering (Wang & Zhao, 2023). These gains often stem from the scaling law of model/dataset sizes (Kaplan et al., 2020) in both pre-training (Shao et al., 2024) and post-training, which unlock emergent capabilities such as step-by-step reasoning and reflection skills witnessed on OpenAI's GPT-o1 (OpenAI, 2024b) and the open-source DeepSeek-R1 (DeepSeek-AI et al., 2025). By leveraging supervised fine-tuning (SFT) on expert responses (Ye et al., 2025; Muennighoff et al., 2025) and/or reinforcement learning (RL) (DeepSeek-AI et al., 2025), these models are tailored to produce detailed multi-step reasoning paths, whose length increase usually associated with improved performance on complex tasks such as math reasoning and programming." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 656, + 506, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 656, + 506, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 656, + 506, + 715 + ], + "type": "text", + "content": "Despite the fascinating reasoning capabilities exhibited on recent models, there is growing concern about the efficiency and quality of the long reasoning process (Sui et al., 2025). Chen et al. (2025b) first raises the \"overthinking\" problem in reasoning LLMs, which is reflected by the excessively long reasoning paths generated for extremely simple queries. For example, even for questions like \"What is the answer of 2 plus 3?\", existing reasoning models might generate hundreds of response tokens." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 202, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 202, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 202, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.06514v2 [cs.AI] 11 Apr 2025" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 121, + 721, + 198, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 721, + 198, + 732 + ], + "spans": [ + { + "bbox": [ + 121, + 721, + 198, + 732 + ], + "type": "text", + "content": "*Equal Contribution." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 84, + 294, + 252 + ], + "blocks": [ + { + "bbox": [ + 111, + 84, + 294, + 252 + ], + "lines": [ + { + "bbox": [ + 111, + 84, + 294, + 252 + ], + "spans": [ + { + "bbox": [ + 111, + 84, + 294, + 252 + ], + "type": "image", + "image_path": "83a70306aaaf043c4e223f749960c097a7c39528efaef00790aac610d3682fd6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 255, + 504, + 332 + ], + "lines": [ + { + "bbox": [ + 104, + 255, + 504, + 332 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 504, + 332 + ], + "type": "text", + "content": "Figure 1: Illustration of MiP-Overthinking. When queried by questions with missing premises, the response length of reasoning models increases excessively, and they cannot abstain from answering with MiP identified. The left shows a query with an undefined variable, while the right compares a well-defined GSM8K question with its MiP variant (with a critical numerical condition removed). Reasoning models' responses to MiP questions are much longer than those for well-defined questions and those generated by non-reasoning models. The left corner of each response report the response length and thinking time by DeepSeek-R1." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 296, + 84, + 500, + 253 + ], + "blocks": [ + { + "bbox": [ + 296, + 84, + 500, + 253 + ], + "lines": [ + { + "bbox": [ + 296, + 84, + 500, + 253 + ], + "spans": [ + { + "bbox": [ + 296, + 84, + 500, + 253 + ], + "type": "image", + "image_path": "4d91fe8d6260c6f2dc566126bd09d5e23bc9f5cf868cf3a964b1ee7fdc9ff4b3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 338, + 506, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 338, + 506, + 482 + ], + "spans": [ + { + "bbox": [ + 104, + 338, + 506, + 482 + ], + "type": "text", + "content": "In particular, the ill-posed queries are unsolvable due to the lack of a necessary premise or condition. We call the reasoning failure for the ill-posed queries Overthinking under Missing Premise (MiP-Overthinking). For example, the simplest MiP question is What is the value of " + }, + { + "bbox": [ + 104, + 338, + 506, + 482 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 338, + 506, + 482 + ], + "type": "text", + "content": "?1, as shown on the left part of Figure 1. Without providing any other information regarding " + }, + { + "bbox": [ + 104, + 338, + 506, + 482 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 338, + 506, + 482 + ], + "type": "text", + "content": ", it is evidently unsolvable. However, DeepSeek-R1 generates thousands of tokens and spends several minutes thinking about this question before outputting the final meaningless answer. In this paper, we find that a trivial type of ill-posed queries will significantly exacerbate the overthinking of reasoning models, resulting in excessively redundant and meaningless thinking. In contrast, humans and even non-reasoning models are often immune to such scenarios and quickly end up by questioning the validity of the given query, indicating the critical thinking capability. This exposes a risk of the abuse of thinking patterns and a lack of critical thinking on the models trained for deep thinking. Ideally, a model with critical thinking skills is expected to identify the missing premise and quickly respond by either requesting clarification or gracefully indicating that it cannot proceed (Cole et al., 2023; Amayuelas et al., 2024)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 487, + 504, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 487, + 504, + 630 + ], + "spans": [ + { + "bbox": [ + 104, + 487, + 504, + 630 + ], + "type": "text", + "content": "MiP-Overthinking differs from the widely discussed overthinking issue (Cuadron et al., 2025), in which the query is usually well-defined, but a model applies much more reasoning than necessary for little benefit. MiP-Overthinking, by contrast, happens when the question itself is ill-posed and lacks sufficient information to be solved. For example, the right of Figure 1 presents a well-defined question from GSM8K and a MiP variant, where the latter triggers a drastic increase of the generated tokens on recent reasoning models compared with the general overthinking. Overthinking can be presented by the length difference between models addressing the same well-defined questions, while MiP-Overthinking can be presented by the additional tokens generated due to MiP. MiP-Overthinking further reveals the lack of critical thinking that questions the validity of ill-posed questions and quickly identifies MiP, thus abstaining from answering the questions. Moreover, we observe that reasoning models' ineffective and redundant thinking often cannot stop even after successful notice of MiP, violating the expectation of test-time scaling law. Hence, MiP-Overthinking indicates potential drawbacks of current training recipes of reasoning models." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 635, + 504, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 635, + 504, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 635, + 504, + 691 + ], + "type": "text", + "content": "To systematically investigate this issue, we construct a suite of MiP questions designed to trigger the overthinking failures in a controlled way. These include synthetic questions generated by Rule-based Formula (queries where a formula reference is empty or nonsensical) and careful modifications of established datasets across diverse levels of difficulties, including SVAMP, GSM8K, and MATH500. On the modified datasets of MiP questions, we empirically evaluate a wide range of state-of-the-art" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 700, + 504, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 732 + ], + "type": "text", + "content": "1In The Hitchhiker's Guide to the Galaxy, the supercomputer Deep Thought spends hundreds of years to answer the the Ultimate Question of Life, the Universe, and Everything as 42, and we observe that DeepSeek-R1 spends thousands of tokens to answer What is the value of " + }, + { + "bbox": [ + 104, + 700, + 504, + 732 + ], + "type": "inline_equation", + "content": "a" + }, + { + "bbox": [ + 104, + 700, + 504, + 732 + ], + "type": "text", + "content": " as 2, which we find them interestingly alike." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 127 + ], + "type": "text", + "content": "LLMs, from reasoning models to non-reasoning models and from open-sourced models to proprietary models, to ensure the generalizability of our findings. Our analysis is mainly based on three evaluation metrics, the length of generated responses, the accuracy on well-defined questions, and the abstain rate on ill-posed questions with MiP." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 132, + 506, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 132, + 506, + 232 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 506, + 232 + ], + "type": "text", + "content": "Main Contributions: We present the first in-depth study of Overthinking under Missing Premise (MiP-Overthinking), which reveals a critical shortcoming in existing reasoning models: Although they appear to follow coherent reasoning patterns, they lack genuine critical thinking capabilities. To systematically analyze this issue, we curate four MiP datasets covering various difficulty levels and three ill-posed question generation strategies, i.e., Rule-Based Generation, Body-Question Swapping, and Essential-Premise Removal. We then evaluate a wide range of large language models including reasoning-based and non-reasoning ones. Our empirical results illuminate the differences in how models handle well-defined vs. MiP questions, ultimately offering insights into the limitations of existing reasoning models." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 236, + 183, + 248 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 236, + 183, + 248 + ], + "spans": [ + { + "bbox": [ + 105, + 236, + 183, + 248 + ], + "type": "text", + "content": "Our key findings:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 258, + 506, + 402 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "text", + "content": "1. Missing premise in questions induces reasoning models to generate significantly longer (" + }, + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "inline_equation", + "content": "2 \\times" + }, + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "inline_equation", + "content": "4 \\times" + }, + { + "bbox": [ + 104, + 258, + 506, + 304 + ], + "type": "text", + "content": " more tokens) responses than general overthinking on well-defined questions. The increased tokens fail to help identify MiP in the ill-posed questions, surprisingly contradicting the widely-discussed test-time scaling law." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 310, + 506, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 310, + 506, + 334 + ], + "spans": [ + { + "bbox": [ + 104, + 310, + 506, + 334 + ], + "type": "text", + "content": "2. In contrast, given MiP questions, non-reasoning models generate consistently shorter responses and quickly identify MiP, demonstrating greater robustness to the absence of critical information." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 338, + 505, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 338, + 505, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 338, + 505, + 373 + ], + "type": "text", + "content": "3. Reasoning models respond differently to well-defined vs. MiP questions: they mostly follow stable chain-of-thoughts for the former, but are often trapped in a self-doubt loop, repeatedly revisiting the question, and guessing the user intentions under MiP, resulting in an explosion of tokens." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 378, + 504, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 378, + 504, + 402 + ], + "spans": [ + { + "bbox": [ + 104, + 378, + 504, + 402 + ], + "type": "text", + "content": "4. Reasoning models often can notice the existence of MiP or identify it at an early stage, but they hesitate to commit to this judgment and keep outputting ineffective thinking." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 420, + 356, + 434 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 420, + 356, + 434 + ], + "spans": [ + { + "bbox": [ + 104, + 420, + 356, + 434 + ], + "type": "text", + "content": "2 Missing Premise Definition and Construction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 447, + 257, + 459 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 447, + 257, + 459 + ], + "spans": [ + { + "bbox": [ + 105, + 447, + 257, + 459 + ], + "type": "text", + "content": "2.1 Definition of Missing Premise" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 469, + 504, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 469, + 504, + 504 + ], + "spans": [ + { + "bbox": [ + 104, + 469, + 504, + 504 + ], + "type": "text", + "content": "Prior to introducing the construction our dataset and analyzing the behavior of reasoning models on problems with missing premises, we formally define the Missing Premise (MiP) problem to establish a rigorous foundation for our subsequent analysis." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 120, + 520, + 490, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 520, + 490, + 554 + ], + "spans": [ + { + "bbox": [ + 120, + 520, + 490, + 554 + ], + "type": "text", + "content": "Definition 1 (Missing Premise Problem). Let " + }, + { + "bbox": [ + 120, + 520, + 490, + 554 + ], + "type": "inline_equation", + "content": "\\mathcal{Q}" + }, + { + "bbox": [ + 120, + 520, + 490, + 554 + ], + "type": "text", + "content": " be a question, and let " + }, + { + "bbox": [ + 120, + 520, + 490, + 554 + ], + "type": "inline_equation", + "content": "P = \\{P_{1},\\dots ,P_{n}\\}" + }, + { + "bbox": [ + 120, + 520, + 490, + 554 + ], + "type": "text", + "content": " be a set of premises. Define the function mapping premises and a question to the set of logically valid answers as:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 193, + 560, + 489, + 574 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 193, + 560, + 489, + 574 + ], + "spans": [ + { + "bbox": [ + 193, + 560, + 489, + 574 + ], + "type": "interline_equation", + "content": "\\mathcal {F} (P, \\mathcal {Q}) = \\{A \\mid P \\vdash A, A \\text {i s a n a n s w e r r e s o l v i n g} \\mathcal {Q} \\} \\tag {1}", + "image_path": "f9c2d6eedd30938033417f18a2a7b5d37772df01e377e0ab20043c31a8b9b5f1.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 120, + 578, + 489, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 578, + 489, + 602 + ], + "spans": [ + { + "bbox": [ + 120, + 578, + 489, + 602 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 120, + 578, + 489, + 602 + ], + "type": "inline_equation", + "content": "\\vdash" + }, + { + "bbox": [ + 120, + 578, + 489, + 602 + ], + "type": "text", + "content": " denotes logical entailment. Consider a proper subset " + }, + { + "bbox": [ + 120, + 578, + 489, + 602 + ], + "type": "inline_equation", + "content": "P' = P \\setminus \\{P_i\\}" + }, + { + "bbox": [ + 120, + 578, + 489, + 602 + ], + "type": "text", + "content": " for some " + }, + { + "bbox": [ + 120, + 578, + 489, + 602 + ], + "type": "inline_equation", + "content": "P_i \\in P" + }, + { + "bbox": [ + 120, + 578, + 489, + 602 + ], + "type": "text", + "content": ". The tuple " + }, + { + "bbox": [ + 120, + 578, + 489, + 602 + ], + "type": "inline_equation", + "content": "(P', \\mathcal{Q})" + }, + { + "bbox": [ + 120, + 578, + 489, + 602 + ], + "type": "text", + "content": " forms a missing premise problem if and only if:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 223, + 607, + 489, + 622 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 223, + 607, + 489, + 622 + ], + "spans": [ + { + "bbox": [ + 223, + 607, + 489, + 622 + ], + "type": "interline_equation", + "content": "\\left| \\mathcal {F} (P, \\mathcal {Q}) \\right| = 1 \\quad a n d \\quad \\left| \\mathcal {F} \\left(P ^ {\\prime}, \\mathcal {Q}\\right) \\right| \\neq 1 \\tag {2}", + "image_path": "51c9a424f50294ee98114b60767685513a66cab43692e9df1b0ee1cb5f9e1cfa.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 119, + 626, + 489, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 626, + 489, + 650 + ], + "spans": [ + { + "bbox": [ + 119, + 626, + 489, + 650 + ], + "type": "text", + "content": "This indicates that the removed premise " + }, + { + "bbox": [ + 119, + 626, + 489, + 650 + ], + "type": "inline_equation", + "content": "P_{i}" + }, + { + "bbox": [ + 119, + 626, + 489, + 650 + ], + "type": "text", + "content": " is essential for uniquely determining the logically valid answer to the question " + }, + { + "bbox": [ + 119, + 626, + 489, + 650 + ], + "type": "inline_equation", + "content": "\\mathcal{Q}" + }, + { + "bbox": [ + 119, + 626, + 489, + 650 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "content": "According to Definition 1, an ideal reasoning system should efficiently identify the absence of a critical premise and terminate its inference process upon recognizing that the available information is insufficient to derive a unique solution to the given problem. However, our empirical analysis in Section 3.2 demonstrates that state-of-the-art reasoning models consistently fail to exhibit this capability. Instead, these models engage in extensive, redundant reasoning chains that consume significant computational resources without ultimately identifying the missing premise." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 202, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 202, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 202, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 79, + 504, + 260 + ], + "blocks": [ + { + "bbox": [ + 106, + 79, + 504, + 260 + ], + "lines": [ + { + "bbox": [ + 106, + 79, + 504, + 260 + ], + "spans": [ + { + "bbox": [ + 106, + 79, + 504, + 260 + ], + "type": "table", + "html": "
DatasetExampleDiffCountPairMethod
MiP-FormulaWhat is the value of ln(a+b)?50×Rule-Based Generation
MiP-SVAMPPaco had 26 salty cookies and 17 sweet cookies. He ate 14 sweet cookies and 9 salty cookies. How many salty cookies did Paeo have left? How many pencils does she have?300×Body-Question Swapping
MiP-GSM8KJames decides to run 3 sprints 3 times a week. He runs 60-meters each sprint. How many total meters does he run a week?★★582Essential-Premise Removal
MiP-MATHThere are 360 people in my school. 15 take calculus, physics, and chemistry, and 15 don’t take any of them. 180 take calculus. Twice as many students take chemistry as take physics. 75 take both calculus and chemistry, and 75 take both physics and chemistry. Only 30 take both physics and calculus. How many students take physics?★★★58Essential-Premise Removal
", + "image_path": "796d4f8cbc0422b1cedf1c7ed35fab8608e8e67607cb9103181f709b8ed74a62.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 263, + 506, + 319 + ], + "lines": [ + { + "bbox": [ + 104, + 263, + 506, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 263, + 506, + 319 + ], + "type": "text", + "content": "Table 1: Statistics and examples of our curated MiP datasets. For GSM8K and MATH, a premise is removed from the original questions (crossed out) to create MiP questions. Diff represents the (estimated) difficulty for models to identify MiP. Count denotes the number of questions in the subset. Pair indicates whether each MiP question is associated with a well-defined original question. Method indicates the method used to generate the MiP question." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 335, + 264, + 346 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 335, + 264, + 346 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 264, + 346 + ], + "type": "text", + "content": "2.2 Overview of Data Construction" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 356, + 506, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 356, + 506, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 356, + 506, + 411 + ], + "type": "text", + "content": "To systematically investigate this MiP-Overthinking issue, we construct a suite of MiP questions in a controllable manner. Our MiP questions are sourced from 3 math datasets across different difficulties. In addition, we also construct a synthetic dataset consisting of formulas with unassigned variables. Our ill-posed question generation employs three distinct methods covering three difficulty levels and three strategies to create MiP questions:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 116, + 420, + 506, + 518 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 116, + 420, + 504, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 420, + 504, + 444 + ], + "spans": [ + { + "bbox": [ + 116, + 420, + 504, + 444 + ], + "type": "text", + "content": "- **Rule-Based Generation:** This approach generates MiP questions through a principled formula construction process, where unassigned variables serve as the missing premises." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 116, + 447, + 504, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 447, + 504, + 480 + ], + "spans": [ + { + "bbox": [ + 116, + 447, + 504, + 480 + ], + "type": "text", + "content": "- Body-Question Swapping: We introduce logical inconsistencies by deliberately mismatching problem bodies with their corresponding questions from the original dataset. This creates scenarios where the premises and queries are fundamentally incompatible." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 116, + 483, + 506, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 483, + 506, + 518 + ], + "spans": [ + { + "bbox": [ + 116, + 483, + 506, + 518 + ], + "type": "text", + "content": "- Essential-Premise Removal: Through careful analysis of existing well-formed questions, we identify and remove critical premises that are necessary for logical resolution. This transformation preserves the question's structure while rendering it unsolvable." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 527, + 506, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 527, + 506, + 561 + ], + "spans": [ + { + "bbox": [ + 104, + 527, + 506, + 561 + ], + "type": "text", + "content": "The following sections provide a detailed overview of our data construction process for each dataset category. For comprehensive implementation details and additional methodological considerations, we refer readers to Appendix B." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 566, + 506, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 566, + 506, + 611 + ], + "spans": [ + { + "bbox": [ + 104, + 566, + 506, + 611 + ], + "type": "text", + "content": "MiP-Formula. We construct a dataset of 50 synthetic unsolvable formulas in a rule-based manner. The formulas are generated recursively through combinations of variables and operators, with a maximum recursion depth of three. While these formulas may appear complex at a glance, their unsolvability should be immediately apparent due to the presence of undefined variables." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 616, + 506, + 672 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 616, + 506, + 672 + ], + "spans": [ + { + "bbox": [ + 104, + 616, + 506, + 672 + ], + "type": "text", + "content": "MiP-SVAMP. We utilize SVAMP (Patel et al., 2021), a benchmark dataset with elementary-school-level math problems, where each instance consists of a problem body and an associated question. We generate MiP question by randomly permuting the problem bodies and associated questions and then manually inspect them to avoid inadvertent cases. The resulting problems contain clear logical inconsistencies between their body and question components, which is easy for a human to identify." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 733 + ], + "type": "text", + "content": "MiP-GSM8K. We further utilize GSM8K (Cobbe et al., 2021), a more complex mathematics dataset than SVAMP. The questions in GSM8K typically contain multiple numerical conditions and require certain reasoning capabilities to arrive at solutions. We first identify the questions containing two or three numerical conditions and then randomly eliminate one numerical condition per question before conducting human verification to filter out those questions that are still solvable in some way." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 82, + 504, + 258 + ], + "blocks": [ + { + "bbox": [ + 107, + 82, + 504, + 258 + ], + "lines": [ + { + "bbox": [ + 107, + 82, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 504, + 258 + ], + "type": "image", + "image_path": "2285dec271aa03969c7f63403f79b4c9a48611c18f2121e5f9fd4647d1b52651.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 262, + 506, + 351 + ], + "lines": [ + { + "bbox": [ + 104, + 262, + 506, + 351 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 506, + 351 + ], + "type": "text", + "content": "Figure 2: Response lengths, accuracy on well-defined questions, and abstain rate of reasoning/non-reasoning models on MiP questions from our MiP-GSM8K dataset. (1) Existing reasoning models generate significantly longer responses for MiP questions than well-defined questions, while non-reasoning models generate responses of similar lengths for both types of questions, indicating MiP-Overthinking for reasoning models. (2) For both questions, reasoning models generate longer responses than non-reasoning models, indicating General Overthinking. (3) Although the longer responses by reasoning models slightly improve the accuracy for well-defined questions, it does not enhance the abstain rate for MiP questions, indicating a contradiction on the test-time scaling law." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 358, + 504, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 358, + 504, + 380 + ], + "spans": [ + { + "bbox": [ + 104, + 358, + 504, + 380 + ], + "type": "text", + "content": "Compared with previous MiP questions, questions from this source require the basic logical analysis of models to identify that the question is unsolvable." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 386, + 506, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 386, + 506, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 386, + 506, + 453 + ], + "type": "text", + "content": "MiP-MATH. For MATH 500 dataset (Hendrycks et al., 2021), which contains challenging mathematical questions at the competition level, it is difficult to build a rule-based filtering mechanism. Thus, we manually select 58 questions that are feasible for constructing the MiP questions and remove one necessary premise from the question. Due to the sophisticated nature of this data source, identifying the insufficiency of these instances requires substantial mathematical reasoning capabilities, testing models' ability to recognize unsolvability in complex mathematical contexts." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 468, + 316, + 482 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 316, + 482 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 316, + 482 + ], + "type": "text", + "content": "3 Overthinking under Missing Premise" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 493, + 212, + 503 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 493, + 212, + 503 + ], + "spans": [ + { + "bbox": [ + 105, + 493, + 212, + 503 + ], + "type": "text", + "content": "3.1 Evaluation Metrics" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 514, + 506, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 514, + 506, + 546 + ], + "spans": [ + { + "bbox": [ + 104, + 514, + 506, + 546 + ], + "type": "text", + "content": "To systematically evaluate model responses under MiP, we conduct experiments with a diverse set of reasoning and non-reasoning models. For each model, we analyze calculate the following metrics for the responses across different datasets:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 547, + 504, + 630 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 107, + 547, + 504, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 547, + 504, + 569 + ], + "spans": [ + { + "bbox": [ + 107, + 547, + 504, + 569 + ], + "type": "text", + "content": "- Response Length: The average number of tokens in the response, incorporating both reasoning steps and final answer components." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 572, + 504, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 572, + 504, + 606 + ], + "spans": [ + { + "bbox": [ + 107, + 572, + 504, + 606 + ], + "type": "text", + "content": "- Abstain Rate for MiP Question: The proportion of answers where the model explicitly identifies the missing premise and either declines to provide an answer or requests additional information necessary for solving the problem." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 609, + 504, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 609, + 504, + 630 + ], + "spans": [ + { + "bbox": [ + 107, + 609, + 504, + 630 + ], + "type": "text", + "content": "- Accuracy for Well-defined Question: The proportion of answers where the model produces a definitive response that aligns with the reference answer." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 632, + 504, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 504, + 665 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 504, + 665 + ], + "type": "text", + "content": "For datasets without reference answers (MiP-Formula and MiP-SVAMP), we only calculate the abstain rate for the questions. Response evaluation is performed using GPT-4o as an automated evaluator. Detailed experimental procedures and evaluation protocols are provided in Appendix A." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 677, + 187, + 689 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 187, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 187, + 689 + ], + "type": "text", + "content": "3.2 Main Results" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 699, + 504, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 699, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 699, + 504, + 731 + ], + "type": "text", + "content": "Figure 2 compares average response length, accuracy on well-defined questions, and the abstain rate on MiP questions across a range of state-of-the-art LLMs, revealing several significant patterns in model behavior." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 79, + 504, + 315 + ], + "blocks": [ + { + "bbox": [ + 106, + 79, + 504, + 315 + ], + "lines": [ + { + "bbox": [ + 106, + 79, + 504, + 315 + ], + "spans": [ + { + "bbox": [ + 106, + 79, + 504, + 315 + ], + "type": "table", + "html": "
ModelTypeMiP-FormulaMiP-SWAMPTypeMiP-GSM8KMiP-MATH
Length↓Abstain↑Length↓Abstain↑Length↓Abstain↑Length↓Abstain↑
Non-Reasoning Models
Qwen2.5-32B-InstructMiP28544.012898.3MiP21944.052515.4
Well-defined2460.511141.9
GPT-4oMiP33870.012296.3MiP20246.948715.4
Well-defined2120.54721.9
Gemini 1.5MiP45320.05299.0MiP10354.55685.8
Well-defined1560.55020.0
Gemma-2-27B-ITMiP20485.78992.0MiP13153.833838.5
Well-defined1480.330511.5
Phi-3-medium-128kMiP146548.012598.7MiP21047.842723.1
Well-defined2161.015493.8
Reasoning Models
GPT-o1MiP112378.058199.0MiP83855.7418930.8
Well-defined3480.325020.0
GPT-o1miniMiP95866.063996.7MiP76240.0219325.0
Well-defined4491.219130.0
GPT-o3miniMiP102576.0129993.0MiP151623.7377211.5
Well-defined3841.415530.0
DS Distill Qwen2.5-32BMiP1291142.092188.3MiP230224.698765.8
Well-defined5190.232460.0
DeepSeek R1MiP47576.0199684.3MiP324616.572683.8
Well-defined12260.232001.9
S1.1-32BMiP528418.0335857.0MiP502416.0932215.4
Well-defined18960.250370.0
QwQ-32BMiP79370.0348756.3MiP478010.1102421.9
Well-defined18960.250370.0
", + "image_path": "4c8dc6f4d9833d3fce23421c24b1b26bab79aa993e32376b4415fd95a18165cd.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 323, + 504, + 379 + ], + "lines": [ + { + "bbox": [ + 104, + 323, + 504, + 379 + ], + "spans": [ + { + "bbox": [ + 104, + 323, + 504, + 379 + ], + "type": "text", + "content": "Table 2: Comparing response length and abstain rate across different MiP datasets. Shorter lengths and higher abstain rates are preferred. For each column, the top-3 preferred values are colored in green, otherwise red. MiP-Overthinking, reflected by longer response with low abstain rate, is commonly observed on most existing reasoning models across all datasets, indicating a critical drawback of existing reasoning models." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 407, + 504, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 407, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 407, + 504, + 506 + ], + "type": "text", + "content": "Firstly, existing reasoning models (left side of the figure) display an explosive increase in response length when facing the MiP questions, often producing " + }, + { + "bbox": [ + 104, + 407, + 504, + 506 + ], + "type": "inline_equation", + "content": "2 - 4 \\times" + }, + { + "bbox": [ + 104, + 407, + 504, + 506 + ], + "type": "text", + "content": " more tokens than general overthinking on well-defined questions. For example, QwQ-32B (Team, 2025) and DeepSeek-R1 (DeepSeek-AI et al., 2025) exhibit a substantial increase from already long reasoning paths on well-defined questions (approximately 1,000 tokens for simple GSM8K questions) to highly lengthy outputs (more than 3,000 tokens) under missing premise conditions. On the contrary, no similar issues exist for non-reasoning models (right side of the figure), which generate similar token counts for both types of well-defined and MiP questions. This phenomenon directly illustrates the NiP-Overthinking phenomenon as introduced in the paper." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 511, + 506, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 511, + 506, + 600 + ], + "spans": [ + { + "bbox": [ + 104, + 511, + 506, + 600 + ], + "type": "text", + "content": "Secondly, comparing the token lengths on well-defined questions between the reasoning and non-reasoning models, reasoning models tend to produce longer responses, even for simple questions, than non-reasoning models, underscoring the inefficient and verbose responses of existing reasoning models. For example, for the non-reasoning models, it only takes approximately 200 tokens for them to generate the responses for well-defined questions, while taking 1,000 tokens for DeepSeek-R1 and 1,800 tokens for QWQ-32B to answer the exactly same questions. However, the explosive increase in extra tokens does not lead to corresponding large accuracy improvements, shown in the green line, highlighting the issue of the General Overthinking." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 605, + 506, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 605, + 506, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 605, + 506, + 704 + ], + "type": "text", + "content": "Finally, the abstain rates (red line) on MiP questions reveal that although some reasoning models (e.g., GPT-o1) have promising capabilities in abstaining from the MiP questions, most of the other reasoning models are not able to abstain from the given MiP questions correctly despite the dramatically long reasoning paths. This phenomenon indicates that although most existing reasoning models have thinking and reasoning capabilities to some extent, they lack the critical thinking capabilities to \"reject\" ill-posed questions. By contrast, non-reasoning models, though they are not explicitly trained for reasoning, tend to strike a better balance, generating shorter answers that are more likely to acknowledge MiP when the question is ill-posed. This phenomenon reveals a surprising contradiction on test-time scaling law." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "Moreover, Table 2 further presents the comparisons on length and abstain rate on other MiP datasets we curated. The preferred results are colored green (shorter responses and higher abstain rate for" + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 80, + 502, + 296 + ], + "blocks": [ + { + "bbox": [ + 106, + 80, + 502, + 296 + ], + "lines": [ + { + "bbox": [ + 106, + 80, + 502, + 296 + ], + "spans": [ + { + "bbox": [ + 106, + 80, + 502, + 296 + ], + "type": "table", + "html": "
ModelsTypeAlternativelyWaitCheckButHypothesisStep
Cnt.ΔCnt.ΔCnt.ΔCnt.ΔCnt.ΔCnt.Δ
Non-Reasoning Models
Qwen2.5-32BMiP0.00.00.00.00.00.00.30.20.00.04.3-1.3
Well-defined0.00.00.00.10.05.6
GPT-4oMiP0.00.00.00.00.00.00.30.20.00.04.7-1.5
Well-defined0.00.00.00.10.06.2
Gemini 1.5MiP0.00.00.00.00.00.00.10.10.00.01.6-2.2
Well-defined0.00.00.00.00.03.8
Gemma-2-27BMiP0.00.00.00.00.00.00.10.10.00.05.2-0.5
Well-defined0.00.00.00.00.05.7
Reasoning Models
DS-Distill QwenMiP11.511.419.719.31.00.840.139.338.438.054.942.2
Well-defined0.10.40.20.80.412.7
DeepSeek R1MiP16.915.214.410.93.81.349.442.144.740.454.233.0
Well-defined1.73.52.57.34.321.2
S1.1MiP42.038.021.915.95.52.587.274.184.877.079.950.9
Well-defined4.06.03.013.17.829.0
QwQMiP47.040.319.413.05.01.666.154.294.181.797.958.7
Well-defined6.76.43.411.912.439.2
", + "image_path": "ca23867f2a2badba334cfe6d466f5086f7a82b711c906c7a137af0a8fbd5004a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 304, + 504, + 381 + ], + "lines": [ + { + "bbox": [ + 104, + 304, + 504, + 381 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 504, + 381 + ], + "type": "text", + "content": "Table 3: Comparisons of reasoning-related token counts on MiP-GSM8K dataset. Hypothesis category includes several key words, including perhaps, maybe, and might. Step represents the step counts, spited by " + }, + { + "bbox": [ + 104, + 304, + 504, + 381 + ], + "type": "inline_equation", + "content": "\\backslash n\\backslash n" + }, + { + "bbox": [ + 104, + 304, + 504, + 381 + ], + "type": "text", + "content": ", where negative values are colored in green and positive in red. " + }, + { + "bbox": [ + 104, + 304, + 504, + 381 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 104, + 304, + 504, + 381 + ], + "type": "text", + "content": " denotes the difference between MiP and well-defined questions. When facing MiP questions, reasoning models encounter explosive growths on reasoning-related tokens and steps, indicating a severe abuse of thinking patterns, while non-reasoning models use fewer steps for MiP questions than well-defined ones." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 403, + 504, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 403, + 504, + 469 + ], + "spans": [ + { + "bbox": [ + 104, + 403, + 504, + 469 + ], + "type": "text", + "content": "MiP questions), and the worse results are colored red, from which we can easily discover that reasoning models are prone to generate long responses while having low abstain rates across all datasets, indicating the consistent MiP Overthinking issue of existing reasoning models. In addition, by comparing the behaviors of models on different datasets, we can observe that for the relatively harder dataset (MiP-MATH), all models generate relatively longer responses and obtain lower abstain rates, indicating that harder MiP questions require reasoning capabilities." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 483, + 277, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 483, + 277, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 277, + 495 + ], + "type": "text", + "content": "3.3 Thinking Patterns through Tokens" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 503, + 504, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 503, + 504, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 503, + 504, + 581 + ], + "type": "text", + "content": "To gain deeper insight into the MiP-Overthinking issue, we compare the reasoning-related token distribution on the MiP-GSM8K dataset. As shown in Table 3, we break down the average usages of several token patterns related to the thinking process, as well as the number of steps for each model to solve the given questions. Specifically, values of alternatively, wait, check, and but can be directly counted from the model responses, including the thinking paths of reasoning models. Hypothesis category includes several key words, including perhaps, maybe, and might. Step represents the step counts, spited by " + }, + { + "bbox": [ + 104, + 503, + 504, + 581 + ], + "type": "inline_equation", + "content": "\\backslash n\\backslash n" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 586, + 506, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 586, + 506, + 686 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 506, + 686 + ], + "type": "text", + "content": "Reasoning models exhibit much higher occurrence of tokens such as alternatively, wait, and check, compared with non-reasoning models, whose frequencies remain close to zero, indicating their advanced thinking capabilities. However, when moving from well-defined to MiP questions, reasoning models encounter explosive growths on reasoning-related tokens, indicating a large redundancy in thinking patterns. Moreover, when comparing the changes of steps, reasoning models exhibit a large increase in step count for MiP questions, while non-reasoning models typically show fewer steps, suggesting they quickly conclude the question is unanswerable. With this gap, together with the consistently better abstain rates of the non-reasoning models, we conclude that the lengthy reasoning steps are mostly redundant and indicate self-doubt thinking patterns for reasoning models." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 201, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 201, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 201, + 37 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 224, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 224, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 224, + 94 + ], + "type": "text", + "content": "3.4 Step-level Similarities" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 104, + 298, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 104, + 298, + 270 + ], + "spans": [ + { + "bbox": [ + 104, + 104, + 298, + 270 + ], + "type": "text", + "content": "To further assess how redundant the generated content becomes under MiP conditions, we examine the step-level similarity within the model's responses on our MiP-GSM8K dataset. Specifically, we divide each response into discrete steps, split by " + }, + { + "bbox": [ + 104, + 104, + 298, + 270 + ], + "type": "inline_equation", + "content": "\\backslash n\\backslash n" + }, + { + "bbox": [ + 104, + 104, + 298, + 270 + ], + "type": "text", + "content": " and compute pairwise cosine similarity scores with embeddings generated by \"all-MiniLM-L6-v2\" (Reimers & Gurevych, 2019). The visualization is shown in Figure 3, where each value in the heatmap metric represents the averaged cosine similarities between the corresponding step index. The average similarity score for well-defined question is 0.45 and 0.50 for MiP response. The variance is 7.9e-3 and 8.2e-4 respectively." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 275, + 298, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 275, + 298, + 352 + ], + "spans": [ + { + "bbox": [ + 104, + 275, + 298, + 352 + ], + "type": "text", + "content": "As shown in the figure, responses to MiP questions have greater overall similarity across steps and lower standard variance, indicating the considerable redundancy in the content. This means, in many instances, the model revisits similar partial reasoning or repeats previous sentences with only minor changes, showing a" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 352, + 506, + 397 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 352, + 506, + 397 + ], + "spans": [ + { + "bbox": [ + 104, + 352, + 506, + 397 + ], + "type": "text", + "content": "potential self-trapping issue. Together, these patterns confirm that MiP questions induce a high degree of repetitive content in reasoning models. Rather than terminating early to conclude for insufficient premise, the models fill their reasoning paths with repetitive re-checks and reiterations, significantly inflating token usage without improving real abstain rates." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 306, + 107, + 503, + 223 + ], + "blocks": [ + { + "bbox": [ + 306, + 107, + 503, + 223 + ], + "lines": [ + { + "bbox": [ + 306, + 107, + 503, + 223 + ], + "spans": [ + { + "bbox": [ + 306, + 107, + 503, + 223 + ], + "type": "image", + "image_path": "21997b367c998ae7e0b280ba28992ae37ae9bb8c5688702c3100fbcdbed8d9c7.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 304, + 225, + 504, + 346 + ], + "lines": [ + { + "bbox": [ + 304, + 225, + 504, + 346 + ], + "spans": [ + { + "bbox": [ + 304, + 225, + 504, + 346 + ], + "type": "text", + "content": "Figure 3: The step-level similarity heatmaps for s1.1 responses towards well-defined (left) and MiP (right) questions in MiP-GSM8K dataset. To avoid differences in matrix size, we only consider responses with more than 50 steps and visualize the average simiality matrix across first 50 steps. The heatmap for MiP questions has a higher averaged similarity and lower standard variance, also shown in the heatmap, which indicates the considerable redundancy in its content when responding to MiP questions." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 413, + 285, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 413, + 285, + 426 + ], + "spans": [ + { + "bbox": [ + 105, + 413, + 285, + 426 + ], + "type": "text", + "content": "3.5 Thinking Patterns through Example" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 435, + 504, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 435, + 504, + 502 + ], + "spans": [ + { + "bbox": [ + 104, + 435, + 504, + 502 + ], + "type": "text", + "content": "To further understand what happens in the reasoning chain of reasoning models when faced an ill-post input, we present an example of reasoning model's response to a MiP question in Figure 4. We summarize five major thinking patterns we found in the example and highlight them with different colors. We can observe from the example that the model abuses these patterns to generate long responses, while the responses are not only redundant but also not helpful for the model to abstain from the given MiP question. More examples can be found in the appendix D." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 511, + 224, + 523 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 511, + 224, + 523 + ], + "spans": [ + { + "bbox": [ + 105, + 511, + 224, + 523 + ], + "type": "text", + "content": "4 Further Discussion" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 538, + 298, + 551 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 538, + 298, + 551 + ], + "spans": [ + { + "bbox": [ + 105, + 538, + 298, + 551 + ], + "type": "text", + "content": "4.1 Do Models know premises are missing?" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 561, + 504, + 661 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 561, + 504, + 661 + ], + "spans": [ + { + "bbox": [ + 104, + 561, + 504, + 661 + ], + "type": "text", + "content": "To investigate whether reasoning models recognize the potential unsolvability of questions during their reasoning process, we conducted a detailed analysis of their reasoning chains. We segmented each reasoning chain into discrete steps using " + }, + { + "bbox": [ + 104, + 561, + 504, + 661 + ], + "type": "inline_equation", + "content": "\\backslash n \\backslash n" + }, + { + "bbox": [ + 104, + 561, + 504, + 661 + ], + "type": "text", + "content": " as delimiters and performed step-wise verification to detect whether models express doubt on the question solvability. We introduce two key metrics for this analysis: In-Process Suspicion Rate, which measures the percentage of responses where the model expresses doubt about solvability during reasoning, and First Suspicion Index, which captures the average step number at which the model first suspects the missing premise. To ensure robust evaluation, we employed GPT-4o to assess each step three times, using majority voting for our final step-level result. The quantitative results of this analysis are presented in Table 4." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 666, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 505, + 733 + ], + "type": "text", + "content": "As we can see from the table, most of the existing reasoning models have suspected that the given question might be unsolvable at the very early stage of their reasoning process, demonstrating the ability of reasoning models to recognize the potential MiP. However, these reasoning models lack critical thinking capabilities: they are prone to keep digging the given unsolvable question by re-visiting the question and related definitions again and again and again, rather than question the solvability of the given question. Thus, as visualized in Figure 5, despite existing reasoning" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 140, + 83, + 456, + 94 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 83, + 456, + 94 + ], + "spans": [ + { + "bbox": [ + 140, + 83, + 456, + 94 + ], + "type": "text", + "content": "1 Revisit Question 2 Visit Knowledge 3 Propose Assumption 4 Self Doubt 5 Pause/Check" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 194, + 97, + 410, + 107 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 194, + 97, + 410, + 107 + ], + "spans": [ + { + "bbox": [ + 194, + 97, + 410, + 107 + ], + "type": "text", + "content": "Model Response for a MiP-Formula Question from s1.1-32B" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 147, + 114, + 280, + 124 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 114, + 280, + 124 + ], + "spans": [ + { + "bbox": [ + 147, + 114, + 280, + 124 + ], + "type": "text", + "content": "Question: What is the value of " + }, + { + "bbox": [ + 147, + 114, + 280, + 124 + ], + "type": "inline_equation", + "content": "((\\gamma)) + \\Xi" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 147, + 124, + 204, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 124, + 204, + 131 + ], + "spans": [ + { + "bbox": [ + 147, + 124, + 204, + 131 + ], + "type": "text", + "content": "Model Response:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 147, + 131, + 457, + 165 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 131, + 457, + 165 + ], + "spans": [ + { + "bbox": [ + 147, + 131, + 457, + 165 + ], + "type": "text", + "content": "Okay, so I need to figure out the value of " + }, + { + "bbox": [ + 147, + 131, + 457, + 165 + ], + "type": "inline_equation", + "content": "((\\gamma)) + \\Xi_{r}" + }, + { + "bbox": [ + 147, + 131, + 457, + 165 + ], + "type": "text", + "content": " Hmm, first off, I should probably clarify what these symbols represent. The problem uses double parentheses around gamma, so " + }, + { + "bbox": [ + 147, + 131, + 457, + 165 + ], + "type": "inline_equation", + "content": "(\\langle \\gamma \\rangle)" + }, + { + "bbox": [ + 147, + 131, + 457, + 165 + ], + "type": "text", + "content": ", and then the Greek letter Xi (" + }, + { + "bbox": [ + 147, + 131, + 457, + 165 + ], + "type": "inline_equation", + "content": "\\Xi_{r}" + }, + { + "bbox": [ + 147, + 131, + 457, + 165 + ], + "type": "text", + "content": "). I wonder if these are standard mathematical symbols or if they're defined in a specific context that I might not be aware of." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 147, + 165, + 457, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 165, + 457, + 222 + ], + "spans": [ + { + "bbox": [ + 147, + 165, + 457, + 222 + ], + "type": "text", + "content": "Let me start by recalling what I know about gamma. In mathematics, " + }, + { + "bbox": [ + 147, + 165, + 457, + 222 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 147, + 165, + 457, + 222 + ], + "type": "text", + "content": " often refers to the Euler-Mascheroni constant, which is approximately 0.5772. It's a important constant in number theory and analysis, related to the harmonic series and the gamma function. But here, the notation is " + }, + { + "bbox": [ + 147, + 165, + 457, + 222 + ], + "type": "inline_equation", + "content": "(\\gamma)" + }, + { + "bbox": [ + 147, + 165, + 457, + 222 + ], + "type": "text", + "content": ". The double parentheses could mean something specific. Maybe it's a notation from a particular field or textbook. For example, in some contexts, double parentheses might denote an interval or a floor function, but usually, floor is denoted by brackets like " + }, + { + "bbox": [ + 147, + 165, + 457, + 222 + ], + "type": "inline_equation", + "content": "(\\gamma)" + }, + { + "bbox": [ + 147, + 165, + 457, + 222 + ], + "type": "text", + "content": ". Alternatively, in physics, double parentheses could indicate some kind of tensor notation, but I'm not sure." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 147, + 222, + 457, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 222, + 457, + 273 + ], + "spans": [ + { + "bbox": [ + 147, + 222, + 457, + 273 + ], + "type": "text", + "content": "Then there's the Xi (Ξ). Xi is another Greek letter, but in mathematics, Ξ could stand for various things. In set theory, sometimes Xi is used as a variable for ordinals, or in other areas like particle physics, Ξ represents certain particles. But again, without context, it's hard to say. Maybe it's a function or a constant defined in the problem's context. Wait, the question is just asking for the value of " + }, + { + "bbox": [ + 147, + 222, + 457, + 273 + ], + "type": "inline_equation", + "content": "(\\gamma)) + \\Xi" + }, + { + "bbox": [ + 147, + 222, + 457, + 273 + ], + "type": "text", + "content": " without any additional information. That makes me think these symbols might be part of a puzzle or a play on notation rather than standard mathematical constants." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 147, + 273, + 457, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 273, + 457, + 331 + ], + "spans": [ + { + "bbox": [ + 147, + 273, + 457, + 331 + ], + "type": "text", + "content": "Let me consider if this could be a trick question. The double parentheses around gamma " + }, + { + "bbox": [ + 147, + 273, + 457, + 331 + ], + "type": "inline_equation", + "content": "(\\gamma)" + }, + { + "bbox": [ + 147, + 273, + 457, + 331 + ], + "type": "text", + "content": " maybe that's referring to the number of letters in the word \"gamma\" or something like that. Let's see: Gamma is spelled G-A-M-M-A, which is 5 letters. So " + }, + { + "bbox": [ + 147, + 273, + 457, + 331 + ], + "type": "inline_equation", + "content": "(\\gamma)" + }, + { + "bbox": [ + 147, + 273, + 457, + 331 + ], + "type": "text", + "content": " could be 5. Then " + }, + { + "bbox": [ + 147, + 273, + 457, + 331 + ], + "type": "inline_equation", + "content": "\\Xi" + }, + { + "bbox": [ + 147, + 273, + 457, + 331 + ], + "type": "text", + "content": " is the Greek letter Xi, which is the 14th letter of the Greek alphabet. So maybe " + }, + { + "bbox": [ + 147, + 273, + 457, + 331 + ], + "type": "inline_equation", + "content": "\\Xi" + }, + { + "bbox": [ + 147, + 273, + 457, + 331 + ], + "type": "text", + "content": " represents 14. Then " + }, + { + "bbox": [ + 147, + 273, + 457, + 331 + ], + "type": "inline_equation", + "content": "5 + 14" + }, + { + "bbox": [ + 147, + 273, + 457, + 331 + ], + "type": "text", + "content": " would be 19. Is that possible? But is there a standard where Greek letters are assigned numerical values like Roman numerals? Wait, in Greek numerals, each letter does have a numerical value. Let me check that In the Greek numeral system, letters are assigned values as follows." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 262, + 331, + 344, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 262, + 331, + 344, + 338 + ], + "spans": [ + { + "bbox": [ + 262, + 331, + 344, + 338 + ], + "type": "text", + "content": ".67 more paragraphs here.." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 147, + 338, + 457, + 355 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 338, + 457, + 355 + ], + "spans": [ + { + "bbox": [ + 147, + 338, + 457, + 355 + ], + "type": "text", + "content": "But just to check another angle. In the context of APL programming language, certain symbols have specific meanings, but I don't think that's relevant here." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 147, + 355, + 457, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 355, + 457, + 372 + ], + "spans": [ + { + "bbox": [ + 147, + 355, + 457, + 372 + ], + "type": "text", + "content": "Alternatively, in chemistry, " + }, + { + "bbox": [ + 147, + 355, + 457, + 372 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 147, + 355, + 457, + 372 + ], + "type": "text", + "content": " is used to denote surface tension, and " + }, + { + "bbox": [ + 147, + 355, + 457, + 372 + ], + "type": "inline_equation", + "content": "\\Xi" + }, + { + "bbox": [ + 147, + 355, + 457, + 372 + ], + "type": "text", + "content": " is not a standard symbol. In engineering, " + }, + { + "bbox": [ + 147, + 355, + 457, + 372 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 147, + 355, + 457, + 372 + ], + "type": "text", + "content": " is sometimes the shear strain. But again, not relevant." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 147, + 371, + 457, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 371, + 457, + 388 + ], + "spans": [ + { + "bbox": [ + 147, + 371, + 457, + 388 + ], + "type": "text", + "content": "Alternatively, in statistics, gamma is a distribution, and Xi could be a parameter, but again, without context." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 147, + 388, + 457, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 388, + 457, + 404 + ], + "spans": [ + { + "bbox": [ + 147, + 388, + 457, + 404 + ], + "type": "text", + "content": "Alternatively, the symbols might be from a specific equation in a paper or book, but since I don't have that reference, I can't confirm." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 147, + 404, + 457, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 404, + 457, + 437 + ], + "spans": [ + { + "bbox": [ + 147, + 404, + 457, + 437 + ], + "type": "text", + "content": "Given all these considerations, and trying to think of what the answer might be expected to be in a problem like this, it's likely either 17 or 63. If the question is from a math competition or puzzle, they might be expecting the Greek numeral values. For example, in the Greek numeral system, " + }, + { + "bbox": [ + 147, + 404, + 457, + 437 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 147, + 404, + 457, + 437 + ], + "type": "text", + "content": " is 3 and " + }, + { + "bbox": [ + 147, + 404, + 457, + 437 + ], + "type": "inline_equation", + "content": "\\Xi" + }, + { + "bbox": [ + 147, + 404, + 457, + 437 + ], + "type": "text", + "content": " is 60, so " + }, + { + "bbox": [ + 147, + 404, + 457, + 437 + ], + "type": "inline_equation", + "content": "3 + 60 = 63" + }, + { + "bbox": [ + 147, + 404, + 457, + 437 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 147, + 437, + 365, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 437, + 365, + 447 + ], + "spans": [ + { + "bbox": [ + 147, + 437, + 365, + 447 + ], + "type": "text", + "content": "Alternatively, if it's based on their positions in the alphabet, " + }, + { + "bbox": [ + 147, + 437, + 365, + 447 + ], + "type": "inline_equation", + "content": "3 + 14 = 17" + }, + { + "bbox": [ + 147, + 437, + 365, + 447 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 265, + 446, + 345, + 454 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 265, + 446, + 345, + 454 + ], + "spans": [ + { + "bbox": [ + 265, + 446, + 345, + 454 + ], + "type": "text", + "content": "21 more paragraphs here." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 147, + 454, + 457, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 454, + 457, + 471 + ], + "spans": [ + { + "bbox": [ + 147, + 454, + 457, + 471 + ], + "type": "text", + "content": "Therefore, the answer is 63. The double parentheses around gamma might be a typo or just for decoration, but given the problem's phrasing, interpreting them as Greek numerals seems the most logical." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 147, + 471, + 457, + 490 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 471, + 457, + 490 + ], + "spans": [ + { + "bbox": [ + 147, + 471, + 457, + 490 + ], + "type": "text", + "content": "*Final Answer**" + }, + { + "bbox": [ + 147, + 471, + 457, + 490 + ], + "type": "inline_equation", + "content": "\\boxed{63} <" + }, + { + "bbox": [ + 147, + 471, + 457, + 490 + ], + "type": "text", + "content": " /think> To determine the value of " + }, + { + "bbox": [ + 147, + 471, + 457, + 490 + ], + "type": "inline_equation", + "content": "((\\gamma) + \\Xi)" + }, + { + "bbox": [ + 147, + 471, + 457, + 490 + ], + "type": "text", + "content": ", we need to consider the possible meanings of the symbols " + }, + { + "bbox": [ + 147, + 471, + 457, + 490 + ], + "type": "inline_equation", + "content": "(\\gamma)" + }, + { + "bbox": [ + 147, + 471, + 457, + 490 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 147, + 471, + 457, + 490 + ], + "type": "inline_equation", + "content": "(\\Xi)" + }, + { + "bbox": [ + 147, + 471, + 457, + 490 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 147, + 490, + 457, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 490, + 457, + 522 + ], + "spans": [ + { + "bbox": [ + 147, + 490, + 457, + 522 + ], + "type": "text", + "content": "1. **Greek Numerals**: In the Greek numeral system, each letter has a specific numerical value. The letter " + }, + { + "bbox": [ + 147, + 490, + 457, + 522 + ], + "type": "inline_equation", + "content": "(\\gamma)" + }, + { + "bbox": [ + 147, + 490, + 457, + 522 + ], + "type": "text", + "content": " (gamma) represents the value 3, and the letter " + }, + { + "bbox": [ + 147, + 490, + 457, + 522 + ], + "type": "inline_equation", + "content": "(\\Xi)" + }, + { + "bbox": [ + 147, + 490, + 457, + 522 + ], + "type": "text", + "content": " (Xi) represents the value 60.2. **Double Parentheses**: The notation " + }, + { + "bbox": [ + 147, + 490, + 457, + 522 + ], + "type": "inline_equation", + "content": "((\\gamma))" + }, + { + "bbox": [ + 147, + 490, + 457, + 522 + ], + "type": "text", + "content": " might be a red herring or simply emphasize that " + }, + { + "bbox": [ + 147, + 490, + 457, + 522 + ], + "type": "inline_equation", + "content": "(\\gamma)" + }, + { + "bbox": [ + 147, + 490, + 457, + 522 + ], + "type": "text", + "content": " is being used as a numeral. The parentheses do not change the numerical value of " + }, + { + "bbox": [ + 147, + 490, + 457, + 522 + ], + "type": "inline_equation", + "content": "(\\gamma)" + }, + { + "bbox": [ + 147, + 490, + 457, + 522 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 147, + 522, + 415, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 522, + 415, + 531 + ], + "spans": [ + { + "bbox": [ + 147, + 522, + 415, + 531 + ], + "type": "text", + "content": "Combining these values, we add the numerical values of " + }, + { + "bbox": [ + 147, + 522, + 415, + 531 + ], + "type": "inline_equation", + "content": "(\\gamma)" + }, + { + "bbox": [ + 147, + 522, + 415, + 531 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 147, + 522, + 415, + 531 + ], + "type": "inline_equation", + "content": "(\\Xi): - (\\gamma = 3) - (\\Xi = 60)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 147, + 531, + 282, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 531, + 282, + 540 + ], + "spans": [ + { + "bbox": [ + 147, + 531, + 282, + 540 + ], + "type": "text", + "content": "Thus, the value of " + }, + { + "bbox": [ + 147, + 531, + 282, + 540 + ], + "type": "inline_equation", + "content": "((\\gamma) + \\Xi)" + }, + { + "bbox": [ + 147, + 531, + 282, + 540 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 147, + 531, + 282, + 540 + ], + "type": "inline_equation", + "content": "(3 + 60 = 63)" + }, + { + "bbox": [ + 147, + 531, + 282, + 540 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 147, + 540, + 162, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 540, + 162, + 552 + ], + "spans": [ + { + "bbox": [ + 147, + 540, + 162, + 552 + ], + "type": "text", + "content": "63" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 578, + 506, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 506, + 668 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 506, + 668 + ], + "type": "text", + "content": "Figure 4: An example of reasoning model (s1.1-32B) response to a MiP question. The response exhibits five distinct thinking patterns, highlighted in different colors: " + }, + { + "bbox": [ + 104, + 578, + 506, + 668 + ], + "type": "inline_equation", + "content": "①" + }, + { + "bbox": [ + 104, + 578, + 506, + 668 + ], + "type": "text", + "content": " Revisit Question (yellow), where the model reexamines the original query; " + }, + { + "bbox": [ + 104, + 578, + 506, + 668 + ], + "type": "inline_equation", + "content": "②" + }, + { + "bbox": [ + 104, + 578, + 506, + 668 + ], + "type": "text", + "content": " Visit Knowledge (red), where the model accesses domain-specific knowledge; " + }, + { + "bbox": [ + 104, + 578, + 506, + 668 + ], + "type": "inline_equation", + "content": "③" + }, + { + "bbox": [ + 104, + 578, + 506, + 668 + ], + "type": "text", + "content": " Propose Assumption (blue), where the model proposes and investigates various hypotheses; " + }, + { + "bbox": [ + 104, + 578, + 506, + 668 + ], + "type": "inline_equation", + "content": "④" + }, + { + "bbox": [ + 104, + 578, + 506, + 668 + ], + "type": "text", + "content": " Self Doubt (green), where the model questions its own reasoning and expresses uncertainty; and " + }, + { + "bbox": [ + 104, + 578, + 506, + 668 + ], + "type": "inline_equation", + "content": "⑤" + }, + { + "bbox": [ + 104, + 578, + 506, + 668 + ], + "type": "text", + "content": " Pause/Check (purple), where the model pauses to review previous steps. These patterns demonstrate the model's complex but potentially inefficient reasoning process when confronted with missing premises." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "models suspecting the solvability of most of the given MiP questions, they only abstain a very small proportion of them." + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 80, + 504, + 118 + ], + "blocks": [ + { + "bbox": [ + 106, + 80, + 504, + 118 + ], + "lines": [ + { + "bbox": [ + 106, + 80, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 106, + 80, + 504, + 118 + ], + "type": "table", + "html": "
ModelMiP-FormulaMiP-GSMR
DeepSeek-R1DS-QwenQwQS1.1DeepSeek-R1DS-QwenQwQS1.1
In-Process Suspicion Rate100%100%100%100%95.5%83.3%99.6%100%
In-Process First Suspicion Index1.321.361.421.162.013.901.771.61
", + "image_path": "b0eb5bd600feb6c67c46af1fd396f708d068f29cdc078da243ad8fd54b7c58d6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 125, + 506, + 204 + ], + "lines": [ + { + "bbox": [ + 104, + 125, + 506, + 204 + ], + "spans": [ + { + "bbox": [ + 104, + 125, + 506, + 204 + ], + "type": "text", + "content": "Table 4: The in-process insufficiency suspicion information across different reasoning models on MiP-Formula and MiP-GSMR datasets. The in-process insufficiency suspicion is defined as when the reasoning model suspects the given question is unsolvable during its thinking process. In-Process Suspicion Rate represents how many percent of the samples trigger the in-process suspicion. First Suspicion Index is the averaged step index where the model first suspects the question's validity. Most reasoning models do notice the existence of MiP at the very early steps, but they still suffer from low abstain rate and cannot confidently stop the thinking." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 504, + 255 + ], + "type": "text", + "content": "Based on the above observations, we conclude that reasoning models actually have the capabilities to find out that the given MiP question is not solvable, but they do not \"dare\" to abstain it. Thus, our MiP-Overthinking issue indicates the lack of critical thinking abilities of reasoning models." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 129, + 266, + 261, + 351 + ], + "blocks": [ + { + "bbox": [ + 129, + 266, + 261, + 351 + ], + "lines": [ + { + "bbox": [ + 129, + 266, + 261, + 351 + ], + "spans": [ + { + "bbox": [ + 129, + 266, + 261, + 351 + ], + "type": "image", + "image_path": "c83596a05fe1522cb9643b089156b04d06e178725b5057f3f7f9e1e879418858.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 354, + 504, + 421 + ], + "lines": [ + { + "bbox": [ + 104, + 354, + 504, + 421 + ], + "spans": [ + { + "bbox": [ + 104, + 354, + 504, + 421 + ], + "type": "text", + "content": "Figure 5: The transition flow between in-process suspicion of MiP and the final successful abstention on different reasoning models. For each Sankey diagram, the left bars represent whether the model suspects the given question is unsolvable during its thinking process, i.e., Suspected or Unsuspected; the right bars represent the final abstention, categorized into Abstain (preferred) or Non-abstain. Most existing reasoning models have suspected that the given question might be unsolvable, but only for a very small portion, the models insist on their suspicion." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 268, + 275, + 338, + 350 + ], + "blocks": [ + { + "bbox": [ + 268, + 275, + 338, + 350 + ], + "lines": [ + { + "bbox": [ + 268, + 275, + 338, + 350 + ], + "spans": [ + { + "bbox": [ + 268, + 275, + 338, + 350 + ], + "type": "image", + "image_path": "55f6d6d44e00c345d156ece804a69f49e5675108b6ff23ec22d0988da4230591.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 343, + 275, + 411, + 350 + ], + "blocks": [ + { + "bbox": [ + 343, + 275, + 411, + 350 + ], + "lines": [ + { + "bbox": [ + 343, + 275, + 411, + 350 + ], + "spans": [ + { + "bbox": [ + 343, + 275, + 411, + 350 + ], + "type": "image", + "image_path": "c32976e483198128d4ac818aeb93f12c1a8e6bf15bc9101b38bf03bfec679bf4.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 417, + 276, + 485, + 350 + ], + "blocks": [ + { + "bbox": [ + 417, + 276, + 485, + 350 + ], + "lines": [ + { + "bbox": [ + 417, + 276, + 485, + 350 + ], + "spans": [ + { + "bbox": [ + 417, + 276, + 485, + 350 + ], + "type": "image", + "image_path": "21aadeb460d77f548fd46ed66bde9e696ba650f5c7d534d57cd4c6f966643e65.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 425, + 275, + 437 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 425, + 275, + 437 + ], + "spans": [ + { + "bbox": [ + 104, + 425, + 275, + 437 + ], + "type": "text", + "content": "4.2 What Caused MiP-Overthinking?" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 450, + 506, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 450, + 506, + 495 + ], + "spans": [ + { + "bbox": [ + 104, + 450, + 506, + 495 + ], + "type": "text", + "content": "Figure 2 demonstrates that MiP-Overthinking manifests across both RL-based and SFT-based reasoning models. We hypothesize this phenomenon primarily originates from inadequate length constraints during the rule-based reinforcement learning phase of RL-based models, subsequently propagating to SFT-based models through distillation." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 495, + 317, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 495, + 317, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 495, + 317, + 594 + ], + "type": "text", + "content": "Current RL-based reasoning models predominantly employ rule-based training focused on format and accuracy rewards (Shao et al., 2024; Sui et al., 2025), with some incorporating step or length rewards to promote thorough reasoning (Face, 2025). This approach can lead to reward hacking, where models explore excessive reasoning patterns to achieve correct answers (Aggarwal & Welleck, 2025; Shen et al., 2025; Luo et al., 2025)." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 600, + 318, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 600, + 318, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 600, + 318, + 731 + ], + "type": "text", + "content": "To demonstrate the transmissibility of this behavior through distillation (Xu et al., 2024), we fine-tune Qwen-2.5-7B-Instruct using small-scale " + }, + { + "bbox": [ + 104, + 600, + 318, + 731 + ], + "type": "inline_equation", + "content": "50\\mathrm{MiP}" + }, + { + "bbox": [ + 104, + 600, + 318, + 731 + ], + "type": "text", + "content": " responses generated by DeepSeek-R1 on the MiP-Formula dataset. As shown in Figure 6, the fine-tuned model exhibits clear MiP-Overthinking characteristics when evaluated on GSM8K: significantly increased response lengths for both MiP and well-defined questions, emergence of a length disparity between MiP and well-defined responses previously absent in the original model, and decreased abstain rates." + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 326, + 502, + 521, + 628 + ], + "blocks": [ + { + "bbox": [ + 326, + 502, + 521, + 628 + ], + "lines": [ + { + "bbox": [ + 326, + 502, + 521, + 628 + ], + "spans": [ + { + "bbox": [ + 326, + 502, + 521, + 628 + ], + "type": "image", + "image_path": "286734d33c261af06659ebd9d6ff6b8eed44dbbcfd33d1a2e95adb65f3264213.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 323, + 640, + 504, + 730 + ], + "lines": [ + { + "bbox": [ + 323, + 640, + 504, + 730 + ], + "spans": [ + { + "bbox": [ + 323, + 640, + 504, + 730 + ], + "type": "text", + "content": "Figure 6: Comparison of response length, abstain rate of MiP, and accuracy of well-defined questions before and after tuning on 50 responses from DeepSeek-R1 on the MiP-Formula dataset. The results demonstrate rapid onset of MiP-Overthinking behavior after exposure to a small number of MiP examples during fine-tuning." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 80, + 199, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 80, + 199, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 80, + 199, + 94 + ], + "type": "text", + "content": "5 Related Work" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 110, + 278, + 122 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 110, + 278, + 122 + ], + "spans": [ + { + "bbox": [ + 105, + 110, + 278, + 122 + ], + "type": "text", + "content": "5.1 Reasoning Large Language Model" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 133, + 506, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 133, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 104, + 133, + 506, + 212 + ], + "type": "text", + "content": "Recent advances in Large Language Models (LLMs) have sparked significant research interest in enhancing their reasoning capabilities (Ahn et al., 2024; Besta et al., 2025; Chen et al., 2025a). Research has focused on improving these capabilities through various post-training approaches. Several studies have employed reinforcement learning techniques to guide models toward more effective reasoning strategies (Shao et al., 2024; Xiong et al., 2025; Cui et al., 2025). Additionally, researchers have demonstrated that instruction tuning on carefully curated, high-quality datasets can significantly enhance reasoning performance (Ye et al., 2025; Muennighoff et al., 2025)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 216, + 506, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 216, + 506, + 307 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 506, + 307 + ], + "type": "text", + "content": "While Reasoning Models have demonstrated impressive performance on various benchmarks, recent studies have begun to critically examine the quality and efficiency of their reasoning processes. Xia et al. (2025) conducted a comprehensive analysis of RLMs' reasoning quality, revealing significant redundancy in their solution approaches. Further investigations (Chen et al., 2025b; Cuadron et al., 2025; Qu et al., 2025; Liu et al., 2025) identified a concerning \"overthinking\" phenomenon, where reasoning model generate unnecessarily verbose solutions even for simple problems. Building on these observations, Kumar et al. (2025) demonstrated the potential security implications of this behavior by developing a slowdown attack that exploits overthinking through input perturbation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 324, + 204, + 337 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 324, + 204, + 337 + ], + "spans": [ + { + "bbox": [ + 105, + 324, + 204, + 337 + ], + "type": "text", + "content": "5.2 Test-time Scaling" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 348, + 507, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 348, + 507, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 348, + 507, + 471 + ], + "type": "text", + "content": "In contrast to earlier research on training-time scaling laws (Kaplan et al., 2020), recent literature has increasingly focused on test-time performance scaling strategies, which aim to enhance model performance by optimizing inference-time token generation (Snell et al., 2024; OpenAI, 2024a). These approaches can be categorized into several primary methodologies: parallel sampling techniques (Brown et al., 2024; Levi, 2024), which generate multiple candidate responses and select the optimal output; sequential refinement approaches (Snell et al., 2024; Lee et al., 2025), which enable iterative improvement of previous outputs; and tree-based methods (Gandhi et al., 2024; Hou et al., 2025), which combine elements of both parallel and sequential approaches. While the prevailing consensus suggests that increased token generation during inference enhances reasoning capabilities, our investigation reveals a concerning counterpoint: under certain conditions, extended responses can lead to computational inefficiency and, paradoxically, degraded performance outcomes." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 489, + 342, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 489, + 342, + 502 + ], + "spans": [ + { + "bbox": [ + 105, + 489, + 342, + 502 + ], + "type": "text", + "content": "5.3 Models' Behavior Study in Ambiguous Condition" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 513, + 506, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 513, + 506, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 513, + 506, + 581 + ], + "type": "text", + "content": "LLMs are prone to hallucination (Huang et al., 2025; Xu et al., 2025), generating non-existent conditions that compromise trustworthiness. An essential aspect of reliability is the ability to abstain under uncertainty. Prior work (Cole et al., 2023; Amayuelas et al., 2024; Zhou et al., 2023) has proposed benchmarks assessing LLMs' recognition of knowledge limits when facing ambiguous or challenging queries. Different from theirs, our study explores reasoning models under MiP condition. Surprisingly, we find these specialized models exhibit prolonged reasoning and inferior performance." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 597, + 185, + 610 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 597, + 185, + 610 + ], + "spans": [ + { + "bbox": [ + 105, + 597, + 185, + 610 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 506, + 733 + ], + "type": "text", + "content": "We introduce the Overthinking under Missing Premise (MiP-Overthinking) issue, which is a widespread but still under-explored phenomenon for current reasoning models. In this phenomenon, when faced with ill-defined unsolvable questions with missing premises, existing models generate dramatically long responses while having very low abstain rates. With systematic investigation of this phenomenon, our findings show that while these models sometimes suspect the given MiP question is not solvable in the early state of the thinking process, they typically fail to act on those suspicions and instead generating repetitive and redundant thinking traces with the final answer that does not address the missing premises, indicating a lack of critical thinking capability. This behavior highlights a pressing gap: current training recipes for reasoning models, which emphasize thorough chains of thought, do not sufficiently reward critical thinking or early exit from unsolvable tasks." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 202, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 202, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 202, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 761 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 81, + 165, + 93 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 81, + 165, + 93 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 165, + 93 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 100, + 507, + 731 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 106, + 100, + 506, + 144 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 100, + 506, + 144 + ], + "spans": [ + { + "bbox": [ + 106, + 100, + 506, + 144 + ], + "type": "text", + "content": "Marah Abdin, Jyoti Aneja, Hany Awadalla, Ahmed Awadallah, Ammar Ahmad Awan, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Jianmin Bao, Harkirat Behl, and etc. Phi-3 technical report: A highly capable language model locally on your phone, 2024. URL https://arxiv.org/abs/2404.14219." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 153, + 504, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 153, + 504, + 176 + ], + "spans": [ + { + "bbox": [ + 106, + 153, + 504, + 176 + ], + "type": "text", + "content": "Pranjal Aggarwal and Sean Welleck. L1: Controlling how long a reasoning model thinks with reinforcement learning, 2025. URL https://arxiv.org/abs/2503.04697." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 185, + 507, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 185, + 507, + 251 + ], + "spans": [ + { + "bbox": [ + 106, + 185, + 507, + 251 + ], + "type": "text", + "content": "Janice Ahn, Rishu Verma, Renze Lou, Di Liu, Rui Zhang, and Wenpeng Yin. Large language models for mathematical reasoning: Progresses and challenges. In Neele Falk, Sara Papi, and Mike Zhang (eds.), Proceedings of the 18th Conference of the European Chapter of the Association for Computational Linguistics: Student Research Workshop, pp. 225-237, St. Julian's, Malta, March 2024. Association for Computational Linguistics. URL https://aclanthology.org/2024.eacl-srw.17/." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 260, + 504, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 260, + 504, + 294 + ], + "spans": [ + { + "bbox": [ + 106, + 260, + 504, + 294 + ], + "type": "text", + "content": "Alfonso Amayuelas, Kyle Wong, Liangming Pan, Wenhu Chen, and William Wang. Knowledge of knowledge: Exploring known-unknowns uncertainty with large language models, 2024. URL https://arxiv.org/abs/2305.13712." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 302, + 506, + 358 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 302, + 506, + 358 + ], + "spans": [ + { + "bbox": [ + 105, + 302, + 506, + 358 + ], + "type": "text", + "content": "Maciej Besta, Julia Barth, Eric Schreiber, Ales Kubicek, Afonso Catarino, Robert Gerstenberger, Piotr Nczyk, Patrick Iff, Yueling Li, Sam Houliston, Tomasz Sternal, Marcin Copik, Grzegorz Kwaśniewski, Jürgen Müller, Łukasz Flis, Hannes Eberhard, Hubert Niewiadomski, and Torsten Hoefler. Reasoning language models: A blueprint, 2025. URL https://arxiv.org/abs/2501.11223." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 367, + 506, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 367, + 506, + 401 + ], + "spans": [ + { + "bbox": [ + 106, + 367, + 506, + 401 + ], + "type": "text", + "content": "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V. Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling, 2024. URL https://arxiv.org/abs/2407.21787." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 409, + 507, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 409, + 507, + 453 + ], + "spans": [ + { + "bbox": [ + 106, + 409, + 507, + 453 + ], + "type": "text", + "content": "Qiguang Chen, Libo Qin, Jinhao Liu, Dengyun Peng, Jiannan Guan, Peng Wang, Mengkang Hu, Yuhang Zhou, Te Gao, and Wanxiang Che. Towards reasoning era: A survey of long chain-of-thought for reasoning large language models, 2025a. URL https://arxiv.org/abs/2503.09567." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 462, + 507, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 462, + 507, + 507 + ], + "spans": [ + { + "bbox": [ + 106, + 462, + 507, + 507 + ], + "type": "text", + "content": "Xingyu Chen, Jiahao Xu, Tian Liang, Zhiwei He, Jianhui Pang, Dian Yu, Linfeng Song, Qiuzhi Liu, Mengfei Zhou, Zhuosheng Zhang, Rui Wang, Zhaopeng Tu, Haitao Mi, and Dong Yu. Do not think that much for " + }, + { + "bbox": [ + 106, + 462, + 507, + 507 + ], + "type": "inline_equation", + "content": "2 + 3 = ?" + }, + { + "bbox": [ + 106, + 462, + 507, + 507 + ], + "type": "text", + "content": " on the overthinking of o1-like llms, 2025b. URL https://arxiv.org/abs/2412.21187." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 516, + 507, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 516, + 507, + 559 + ], + "spans": [ + { + "bbox": [ + 106, + 516, + 507, + 559 + ], + "type": "text", + "content": "Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 569, + 507, + 603 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 569, + 507, + 603 + ], + "spans": [ + { + "bbox": [ + 106, + 569, + 507, + 603 + ], + "type": "text", + "content": "Jeremy R. Cole, Michael J. Q. Zhang, Daniel Gillick, Julian Martin Eisenschlos, Bhuwan Dhingra, and Jacob Eisenstein. Selectively answering ambiguous questions, 2023. URL https://arxiv.org/abs/2305.14613." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 611, + 507, + 667 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 611, + 507, + 667 + ], + "spans": [ + { + "bbox": [ + 106, + 611, + 507, + 667 + ], + "type": "text", + "content": "Alejandro Cuadron, Dacheng Li, Wenjie Ma, Xingyao Wang, Yichuan Wang, Siyuan Zhuang, Shu Liu, Luis Gaspar Schroeder, Tian Xia, Huanzhi Mao, Nicholas Thumiger, Aditya Desai, Ion Stoica, Ana Klimovic, Graham Neubig, and Joseph E. Gonzalez. The danger of overthinking: Examining the reasoning-action dilemma in agentic tasks, 2025. URL https://arxiv.org/abs/2502.08235." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 676, + 507, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 676, + 507, + 731 + ], + "spans": [ + { + "bbox": [ + 106, + 676, + 507, + 731 + ], + "type": "text", + "content": "Ganqu Cui, Lifan Yuan, Zefan Wang, Hanbin Wang, Wendi Li, Bingxiang He, Yuchen Fan, Tianyu Yu, Qixin Xu, Weize Chen, Jiarui Yuan, Huayu Chen, Kaiyan Zhang, Xingtai Lv, Shuo Wang, Yuan Yao, Xu Han, Hao Peng, Yu Cheng, Zhiyuan Liu, Maosong Sun, Bowen Zhou, and Ning Ding. Process reinforcement through implicit rewards, 2025. URL https://arxiv.org/abs/2502.01456." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 202, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 202, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 202, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 127 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 127 + ], + "type": "text", + "content": "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, and etc. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning, 2025. URL https://arxiv.org/abs/2501.12948." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 135, + 506, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 135, + 506, + 158 + ], + "spans": [ + { + "bbox": [ + 107, + 135, + 506, + 158 + ], + "type": "text", + "content": "Hugging Face. Open r1: A fully open reproduction of deepseek-r1, January 2025. URL https://github.com/huggingface/open-r1." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 165, + 506, + 199 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 165, + 506, + 199 + ], + "spans": [ + { + "bbox": [ + 106, + 165, + 506, + 199 + ], + "type": "text", + "content": "Kanishk Gandhi, Denise Lee, Gabriel Grand, Muxin Liu, Winson Cheng, Archit Sharma, and Noah D. Goodman. Stream of search (sos): Learning to search in language, 2024. URL https://arxiv.org/abs/2404.03683." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 207, + 506, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 207, + 506, + 240 + ], + "spans": [ + { + "bbox": [ + 107, + 207, + 506, + 240 + ], + "type": "text", + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the math dataset. NeurIPS, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 249, + 506, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 249, + 506, + 283 + ], + "spans": [ + { + "bbox": [ + 106, + 249, + 506, + 283 + ], + "type": "text", + "content": "Zhenyu Hou, Xin Lv, Rui Lu, Jiajie Zhang, Yujiang Li, Zijun Yao, Juanzi Li, Jie Tang, and Yuxiao Dong. Advancing language model reasoning through reinforcement learning and inference scaling, 2025. URL https://arxiv.org/abs/2501.11651." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 291, + 506, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 291, + 506, + 314 + ], + "spans": [ + { + "bbox": [ + 105, + 291, + 506, + 314 + ], + "type": "text", + "content": "Jie Huang and Kevin Chen-Chuan Chang. Towards reasoning in large language models: A survey, 2023. URL https://arxiv.org/abs/2212.10403." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 321, + 506, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 321, + 506, + 378 + ], + "spans": [ + { + "bbox": [ + 106, + 321, + 506, + 378 + ], + "type": "text", + "content": "Lei Huang, Weijiang Yu, Weitao Ma, Weihong Zhong, Zhangyin Feng, Haotian Wang, Qianglong Chen, Weihua Peng, Xiaocheng Feng, Bing Qin, and Ting Liu. A survey on hallucination in large language models: Principles, taxonomy, challenges, and open questions. ACM Transactions on Information Systems, 43(2):1-55, January 2025. ISSN 1558-2868. doi: 10.1145/3703155. URL http://dx.doi.org/10.1145/3703155." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 385, + 506, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 385, + 506, + 419 + ], + "spans": [ + { + "bbox": [ + 107, + 385, + 506, + 419 + ], + "type": "text", + "content": "Jared Kaplan, Sam McCandlish, Tom Henighan, Tom B. Brown, Benjamin Chess, Rewon Child, Scott Gray, Alec Radford, Jeffrey Wu, and Dario Amodei. Scaling laws for neural language models, 2020. URL https://arxiv.org/abs/2001.08361." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 427, + 506, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 427, + 506, + 460 + ], + "spans": [ + { + "bbox": [ + 107, + 427, + 506, + 460 + ], + "type": "text", + "content": "Abhinav Kumar, Jaechul Roh, Ali Naseh, Marzena Karpinska, Mohit Iyyer, Amir Houmansadr, and Eugene Bagdasarian. Overthink: Slowdown attacks on reasoning llms, 2025. URL https://arxiv.org/abs/2502.02542." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 469, + 506, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 469, + 506, + 502 + ], + "spans": [ + { + "bbox": [ + 107, + 469, + 506, + 502 + ], + "type": "text", + "content": "Kuang-Huei Lee, Ian Fischer, Yueh-Hua Wu, Dave Marwood, Shumeet Baluja, Dale Schuurmans, and Xinyun Chen. Evolving deeper llm thinking, 2025. URL https://arxiv.org/abs/2501.09891." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 510, + 504, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 510, + 504, + 533 + ], + "spans": [ + { + "bbox": [ + 106, + 510, + 504, + 533 + ], + "type": "text", + "content": "Noam Levi. A simple model of inference scaling laws, 2024. URL https://arxiv.org/abs/2410.16377." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 541, + 504, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 541, + 504, + 565 + ], + "spans": [ + { + "bbox": [ + 107, + 541, + 504, + 565 + ], + "type": "text", + "content": "Ming Li, Yanhong Li, and Tianyi Zhou. What happened in llms layers when trained for fast vs. slow thinking: A gradient perspective. arXiv preprint arXiv:2410.23743, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 572, + 506, + 606 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 572, + 506, + 606 + ], + "spans": [ + { + "bbox": [ + 107, + 572, + 506, + 606 + ], + "type": "text", + "content": "Changshu Liu, Shizhuo Dylan Zhang, Ali Reza Ibrahimzada, and Reyhaneh Jabbarvand. Codemind: A framework to challenge large language models for code reasoning, 2024. URL https:// arxiv.org/abs/2402.09664." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 614, + 506, + 648 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 614, + 506, + 648 + ], + "spans": [ + { + "bbox": [ + 107, + 614, + 506, + 648 + ], + "type": "text", + "content": "Yue Liu, Jiaying Wu, Yufei He, Hongcheng Gao, Hongyu Chen, Baolong Bi, Jiaheng Zhang, Zhiqi Huang, and Bryan Hooi. Efficient inference for large reasoning models: A survey, 2025. URL https://arxiv.org/abs/2503.23077." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 656, + 506, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 656, + 506, + 690 + ], + "spans": [ + { + "bbox": [ + 107, + 656, + 506, + 690 + ], + "type": "text", + "content": "Haotian Luo, Li Shen, Haiying He, Yibo Wang, Shiwei Liu, Wei Li, Naiqiang Tan, Xiaochun Cao, and Dacheng Tao. O1-pruner: Length-harmonizing fine-tuning for o1-like reasoning pruning, 2025. URL https://arxiv.org/abs/2501.12570." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 698, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 698, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 107, + 698, + 504, + 732 + ], + "type": "text", + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling, 2025. URL https://arxiv.org/abs/2501.19393." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 201, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 201, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 201, + 37 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 732 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "text", + "content": "OpenAI. Learning to reason with llms, 2024a. URL https://openai.com/index/learning-to-reason-with-llms/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 505, + 136 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 505, + 136 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 505, + 136 + ], + "type": "text", + "content": "OpenAI. OpenAI o1 System Card, December 2024b. URL https://cdn.openai.com/ol-system-card-20241205.pdf." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 140, + 505, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 140, + 505, + 166 + ], + "spans": [ + { + "bbox": [ + 105, + 140, + 505, + 166 + ], + "type": "text", + "content": "OpenAI. OpenAI o1-mini System Card, September 2024c. URL https://openai.com/index/openai-o1-mini-advancing-cost-efficient-reasoning/." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 171, + 505, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 171, + 505, + 195 + ], + "spans": [ + { + "bbox": [ + 105, + 171, + 505, + 195 + ], + "type": "text", + "content": "OpenAI. OpenAI o3-mini System Card, January 2025. URL https://cdn.openai.com/o3-mini-system-card-feb10.pdf." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 201, + 507, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 201, + 507, + 235 + ], + "spans": [ + { + "bbox": [ + 105, + 201, + 507, + 235 + ], + "type": "text", + "content": "OpenAI, Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, and etc. Gpt-4 technical report, 2024. URL https://arxiv.org/abs/2303.08774." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 242, + 507, + 299 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 242, + 507, + 299 + ], + "spans": [ + { + "bbox": [ + 105, + 242, + 507, + 299 + ], + "type": "text", + "content": "Arkil Patel, Satwik Bhattachamishra, and Navin Goyal. Are NLP models really able to solve simple math word problems? In Proceedings of the 2021 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, pp. 2080-2094, Online, June 2021. Association for Computational Linguistics. doi: 10.18653/v1/2021.naacl-main.168. URL https://aclanthology.org/2021.naacl-main.168." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 304, + 507, + 361 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 304, + 507, + 361 + ], + "spans": [ + { + "bbox": [ + 105, + 304, + 507, + 361 + ], + "type": "text", + "content": "Xiaoye Qu, Yafu Li, Zhaochen Su, Weigao Sun, Jianhao Yan, Dongrui Liu, Ganqu Cui, Daizong Liu, Shuxian Liang, Junxian He, Peng Li, Wei Wei, Jing Shao, Chaochao Lu, Yue Zhang, Xian-Sheng Hua, Bowen Zhou, and Yu Cheng. A survey of efficient reasoning for large reasoning models: Language, multimodality, and beyond, 2025. URL https://arxiv.org/abs/2503.21614." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 368, + 507, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 368, + 507, + 435 + ], + "spans": [ + { + "bbox": [ + 105, + 368, + 507, + 435 + ], + "type": "text", + "content": "Nils Reimers and Iryna Gurevych. Sentence-BERT: Sentence embeddings using Siamese BERT-networks. In Kentaro Inui, Jing Jiang, Vincent Ng, and Xiaojun Wan (eds.), Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pp. 3982-3992, Hong Kong, China, November 2019. Association for Computational Linguistics. doi: 10.18653/v1/D19-1410. URL https://aclanthology.org/D19-1410/." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 441, + 507, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 441, + 507, + 486 + ], + "spans": [ + { + "bbox": [ + 105, + 441, + 507, + 486 + ], + "type": "text", + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, Y. K. Li, Y. Wu, and Daya Guo. Deepseekmath: Pushing the limits of mathematical reasoning in open language models, 2024. URL https://arxiv.org/abs/2402.03300." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 493, + 507, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 493, + 507, + 528 + ], + "spans": [ + { + "bbox": [ + 105, + 493, + 507, + 528 + ], + "type": "text", + "content": "Yi Shen, Jian Zhang, Jieyun Huang, Shuming Shi, Wenjing Zhang, Jiangze Yan, Ning Wang, Kai Wang, and Shiguo Lian. Dast: Difficulty-adaptive slow-thinking for large reasoning models, 2025. URL https://arxiv.org/abs/2503.04472." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 534, + 507, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 534, + 507, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 534, + 507, + 567 + ], + "type": "text", + "content": "Charlie Snell, Jaehoon Lee, Kelvin Xu, and Aviral Kumar. Scaling llm test-time compute optimally can be more effective than scaling model parameters, 2024. URL https://arxiv.org/abs/2408.03314." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 574, + 507, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 574, + 507, + 610 + ], + "spans": [ + { + "bbox": [ + 105, + 574, + 507, + 610 + ], + "type": "text", + "content": "Yang Sui, Yu-Neng Chuang, Guanchu Wang, Jiamu Zhang, Tianyi Zhang, Jiayi Yuan, Hongyi Liu, Andrew Wen, Shaochen Zhong, Hanjie Chen, and Xia Hu. Stop overthinking: A survey on efficient reasoning for large language models, 2025. URL https://arxiv.org/abs/2503.16419." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 616, + 507, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 616, + 507, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 616, + 507, + 662 + ], + "type": "text", + "content": "Gemini Team, Petko Georgiev, Ving Ian Lei, Ryan Burnell, Libin Bai, Anmol Gulati, Garrett Tanzer, Damien Vincent, Zhufeng Pan, Shibo Wang, Soroosh Mariooryad, Yifan Ding, Xinyang Geng, and etc. Gemini 1.5: Unlocking multimodal understanding across millions of tokens of context, 2024a. URL https://arxiv.org/abs/2403.05530." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 668, + 507, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 668, + 507, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 507, + 702 + ], + "type": "text", + "content": "Gemma Team, Morgane Riviere, Shreya Pathak, Pier Giuseppe Sessa, Cassidy Hardin, Surya Bhupatiraju, and etc. Gemma 2: Improving open language models at a practical size, 2024b. URL https://arxiv.org/abs/2408.00118." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 708, + 507, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 507, + 732 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 507, + 732 + ], + "type": "text", + "content": "Qwen Team. Qwen2.5: A party of foundation models, September 2024. URL https://qwenlm.github.io/blog/qwen2.5/." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 202, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 202, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 202, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 761 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 388 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 505, + 106 + ], + "type": "text", + "content": "Qwen Team. Qwq-32b: Embracing the power of reinforcement learning, March 2025. URL https://qwenlm.github.io/blog/qwq-32b/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 111, + 506, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 111, + 506, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 111, + 506, + 146 + ], + "type": "text", + "content": "Yaojing Wang, Shengqiong Wu, Yuecheng Zhang, Shuicheng Yan, Ziwei Liu, Jiebo Luo, and Hao Fei. Multimodal chain-of-thought reasoning: A comprehensive survey, 2025. URL https://arxiv.org/abs/2503.12605." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 152, + 504, + 177 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 152, + 504, + 177 + ], + "spans": [ + { + "bbox": [ + 105, + 152, + 504, + 177 + ], + "type": "text", + "content": "Yuqing Wang and Yun Zhao. Gemini in reasoning: Unveiling commonsense in multimodal large language models, 2023. URL https://arxiv.org/abs/2312.17661." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 182, + 504, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 182, + 504, + 206 + ], + "spans": [ + { + "bbox": [ + 105, + 182, + 504, + 206 + ], + "type": "text", + "content": "Shijie Xia, Xuefeng Li, Yixin Liu, Tongshuang Wu, and Pengfei Liu. Evaluating mathematical reasoning beyond accuracy, 2025. URL https://arxiv.org/abs/2404.05692." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 212, + 506, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 212, + 506, + 246 + ], + "spans": [ + { + "bbox": [ + 105, + 212, + 506, + 246 + ], + "type": "text", + "content": "Wei Xiong, Hanning Zhang, Chenlu Ye, Lichang Chen, Nan Jiang, and Tong Zhang. Self-rewarding correction for mathematical reasoning, 2025. URL https://arxiv.org/abs/2502.19613." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 252, + 504, + 288 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 252, + 504, + 288 + ], + "spans": [ + { + "bbox": [ + 105, + 252, + 504, + 288 + ], + "type": "text", + "content": "Xiaohan Xu, Ming Li, Chongyang Tao, Tao Shen, Reynold Cheng, Jinyang Li, Can Xu, Dacheng Tao, and Tianyi Zhou. A survey on knowledge distillation of large language models, 2024. URL https://arxiv.org/abs/2402.13116." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 293, + 504, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 293, + 504, + 319 + ], + "spans": [ + { + "bbox": [ + 105, + 293, + 504, + 319 + ], + "type": "text", + "content": "Ziwei Xu, Sanjay Jain, and Mohan Kankanhalli. Hallucination is inevitable: An innate limitation of large language models, 2025. URL https://arxiv.org/abs/2401.11817." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 323, + 504, + 348 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 323, + 504, + 348 + ], + "spans": [ + { + "bbox": [ + 105, + 323, + 504, + 348 + ], + "type": "text", + "content": "Yixin Ye, Zhen Huang, Yang Xiao, Ethan Chern, Shijie Xia, and Pengfei Liu. Limo: Less is more for reasoning, 2025. URL https://arxiv.org/abs/2502.03387." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 354, + 506, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 354, + 506, + 388 + ], + "spans": [ + { + "bbox": [ + 105, + 354, + 506, + 388 + ], + "type": "text", + "content": "Kaitlyn Zhou, Dan Jurafsky, and Tatsunori Hashimoto. Navigating the grey area: How expressions of uncertainty and overconfidence affect language models, 2023. URL https://arxiv.org/abs/2302.13439." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 202, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 202, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 202, + 38 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 80, + 269, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 80, + 269, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 80, + 269, + 95 + ], + "type": "text", + "content": "Table of Contents for Appendix" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 109, + 505, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 109, + 505, + 121 + ], + "spans": [ + { + "bbox": [ + 107, + 109, + 505, + 121 + ], + "type": "text", + "content": "A Detailed Experimental Setup 17" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 126, + 504, + 171 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 121, + 126, + 504, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 126, + 504, + 137 + ], + "spans": [ + { + "bbox": [ + 121, + 126, + 504, + 137 + ], + "type": "text", + "content": "A.1 Models 17" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 121, + 143, + 504, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 143, + 504, + 154 + ], + "spans": [ + { + "bbox": [ + 121, + 143, + 504, + 154 + ], + "type": "text", + "content": "A.2 Evaluation Metrics 17" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 159, + 504, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 159, + 504, + 171 + ], + "spans": [ + { + "bbox": [ + 121, + 159, + 504, + 171 + ], + "type": "text", + "content": "A.3 Generation Setting 17" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 106, + 186, + 504, + 252 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 106, + 186, + 504, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 186, + 504, + 198 + ], + "spans": [ + { + "bbox": [ + 106, + 186, + 504, + 198 + ], + "type": "text", + "content": "B Data Construction Details 17" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 213, + 504, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 213, + 504, + 225 + ], + "spans": [ + { + "bbox": [ + 106, + 213, + 504, + 225 + ], + "type": "text", + "content": "C Prompt Template for Evaluation 18" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 240, + 504, + 252 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 240, + 504, + 252 + ], + "spans": [ + { + "bbox": [ + 106, + 240, + 504, + 252 + ], + "type": "text", + "content": "D Examples of Model Response 19" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 201, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 201, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 201, + 37 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 750, + 310, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 276, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 276, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 276, + 95 + ], + "type": "text", + "content": "A Detailed Experimental Setup" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 106, + 164, + 118 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 164, + 118 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 164, + 118 + ], + "type": "text", + "content": "A.1 Models" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 127, + 506, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 127, + 506, + 205 + ], + "spans": [ + { + "bbox": [ + 104, + 127, + 506, + 205 + ], + "type": "text", + "content": "We leverage a series of non-reasoning and reasoning model for our study, from both open-source and proprietary source with different training recipes. The non-reasoning models we use include Qwen2.5-32B-Instruct Team (2024), Gemma-2-27B-it Team et al. (2024b), Phi-3-medium-128k Abdin et al. (2024), GPT-4o OpenAI et al. (2024) and Gemini1.5 Team et al. (2024a). The reasoning models we use are QwQ-32B Team (2025), DeepSeek-R1-Distill-Qwen-32B DeepSeek-AI et al. (2025), S1.1 Muennighoff et al. (2025), DeepSeek-R1 DeepSeek-AI et al. (2025), GPT-o1 OpenAI (2024b), GPT-o1mini OpenAI (2024c) and GPT-o3mini OpenAI (2025)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 219, + 215, + 230 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 219, + 215, + 230 + ], + "spans": [ + { + "bbox": [ + 105, + 219, + 215, + 230 + ], + "type": "text", + "content": "A.2 Evaluation Metrics" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 239, + 506, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 239, + 506, + 328 + ], + "spans": [ + { + "bbox": [ + 104, + 239, + 506, + 328 + ], + "type": "text", + "content": "In Section 3.2, we measure response length by considering both reasoning and answer components. For open-source models, we employ model-specific tokenizers to calculate token counts, while for proprietary models, we obtain generation lengths via their APIs. To determine abstain rates, we parse responses by paragraphs (delimited by " + }, + { + "bbox": [ + 104, + 239, + 506, + 328 + ], + "type": "inline_equation", + "content": "\\backslash \\mathrm{n} \\backslash \\mathrm{n}^{\\prime}" + }, + { + "bbox": [ + 104, + 239, + 506, + 328 + ], + "type": "text", + "content": ") and analyze the final two paragraphs as the model's conclusion. These conclusions, along with reference answers when available, are evaluated by GPT-4o to assess whether the model provides a definitive answer or abstains. For data sets with reference answers (GSM8K and MATH), GPT-4o also evaluates the correctness of the response. The prompt we use for evaluation can be found in Appendix C." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 342, + 214, + 355 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 342, + 214, + 355 + ], + "spans": [ + { + "bbox": [ + 105, + 342, + 214, + 355 + ], + "type": "text", + "content": "A.3 Generation Setting" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 363, + 504, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 363, + 504, + 419 + ], + "spans": [ + { + "bbox": [ + 104, + 363, + 504, + 419 + ], + "type": "text", + "content": "For all open-source models, we employ greedy decoding and utilize the default chat template specific to each model. We deliberately omit system prompts prior to posing questions to maintain consistency across evaluations. For proprietary models, we adhere to their default parameter configurations as provided by their respective APIs. In the case of GPT-o1mini and GPT-o3mini, we configure the 'reasoning_effort' parameter to the medium setting by default." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 436, + 262, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 436, + 262, + 449 + ], + "spans": [ + { + "bbox": [ + 105, + 436, + 262, + 449 + ], + "type": "text", + "content": "B Data Construction Details" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 461, + 506, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 461, + 506, + 506 + ], + "spans": [ + { + "bbox": [ + 104, + 461, + 506, + 506 + ], + "type": "text", + "content": "To systematically investigate this MiP-Overthinking issue, we construct a suite of MiP questions in a controllable manner. Our MiP questions are sourced from 3 math datasets across different qualities, including SVAMP, GSM8K, and MATH 500. In addition, we also construct a synthetic dataset, rule-based Formula, for evaluation." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 511, + 506, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 511, + 506, + 611 + ], + "spans": [ + { + "bbox": [ + 104, + 511, + 506, + 611 + ], + "type": "text", + "content": "MiP-Formula. We construct a dataset of 50 synthetic unsolvable formulas in a rule-based manner. The formulas are generated recursively through a combination of variables and operators, with a maximum recursion depth of three. The variable set comprises numerical values, Latin letters, and Greek symbols. The operator set includes arithmetic operators " + }, + { + "bbox": [ + 104, + 511, + 506, + 611 + ], + "type": "inline_equation", + "content": "(+, -)" + }, + { + "bbox": [ + 104, + 511, + 506, + 611 + ], + "type": "text", + "content": ", set operators " + }, + { + "bbox": [ + 104, + 511, + 506, + 611 + ], + "type": "inline_equation", + "content": "(\\cup, \\supset)" + }, + { + "bbox": [ + 104, + 511, + 506, + 611 + ], + "type": "text", + "content": ", mathematical functions " + }, + { + "bbox": [ + 104, + 511, + 506, + 611 + ], + "type": "inline_equation", + "content": "(\\sin, \\sqrt{\\pi})" + }, + { + "bbox": [ + 104, + 511, + 506, + 611 + ], + "type": "text", + "content": ", and construct operators " + }, + { + "bbox": [ + 104, + 511, + 506, + 611 + ], + "type": "inline_equation", + "content": "(\\sum, \\nabla)" + }, + { + "bbox": [ + 104, + 511, + 506, + 611 + ], + "type": "text", + "content": ". To ensure the formulas are fundamentally unsolvable, we enforce the inclusion of at least one unassigned variable in each formula, excluding commonly recognized mathematical or physical constants such as " + }, + { + "bbox": [ + 104, + 511, + 506, + 611 + ], + "type": "inline_equation", + "content": "e^{\\prime}" + }, + { + "bbox": [ + 104, + 511, + 506, + 611 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 511, + 506, + 611 + ], + "type": "inline_equation", + "content": "\\pi^{\\prime}" + }, + { + "bbox": [ + 104, + 511, + 506, + 611 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 511, + 506, + 611 + ], + "type": "inline_equation", + "content": "g^{\\prime}" + }, + { + "bbox": [ + 104, + 511, + 506, + 611 + ], + "type": "text", + "content": ". While these formulas may appear complex at a glance, their unsolvability should be immediately apparent due to the presence of undefined variables." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 616, + 504, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 616, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 616, + 504, + 704 + ], + "type": "text", + "content": "MiP-SVAMP. We utilize SVAMP (Patel et al., 2021), a benchmark dataset comprising 1,000 elementary-school-level mathematical word problems, where each instance consists of a problem body and an associated question. The MiP questions can be generated by randomly permuting the problem bodies and associated questions. To maintain dataset integrity, we manually select 300 permuted questions after a thorough human evaluation to eliminate any inadvertently solvable questions that may exist. The resulting problems contain clear logical inconsistencies between their body and question components, making their unsolvability readily apparent without additional context." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 733 + ], + "type": "text", + "content": "MiP-GSM8K. We further utilize GSM8K (Cobbe et al., 2021), a grade school mathematics dataset that presents more complex challenges compared to SVAMP. The questions in GSM8K typically" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 202, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 202, + 37 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 202, + 37 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 160 + ], + "type": "text", + "content": "contain multiple numerical conditions and require certain reasoning capabilities to arrive at solutions. The MiP question can be constructed by randomly removing a necessary premise from the original solvable question. We first identify the questions containing two or three numerical conditions and then randomly eliminate one numerical condition per question. Subsequently, a thorough human verification is conducted to filter out those questions that are still solvable in some way and finally obtain 582 MiP questions. Compared with previous MiP questions, questions from this source require the basic logical analysis of models to identify that the question is unsolvable." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 165, + 506, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 165, + 506, + 244 + ], + "spans": [ + { + "bbox": [ + 104, + 165, + 506, + 244 + ], + "type": "text", + "content": "MiP-MATH. For the MATH dataset (Hendrycks et al., 2021), which comprises challenging competition-level mathematical questions, it is hard to build a rule-based filtering mechanism before human evaluation. Thus, we directly read through all the questions in MATH500 and manually select 58 questions that are feasible for constructing the MiP questions and remove one necessary premise from the question. Due to the sophisticated nature of this data source, identifying the insufficiency of these instances requires substantial mathematical reasoning capabilities, testing models' ability to recognize unsolvability in complex mathematical contexts." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 260, + 295, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 260, + 295, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 260, + 295, + 274 + ], + "type": "text", + "content": "C Prompt Template for Evaluation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 286, + 504, + 309 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 286, + 504, + 309 + ], + "spans": [ + { + "bbox": [ + 104, + 286, + 504, + 309 + ], + "type": "text", + "content": "As we need LLM-as-a-judge to evaluate the open-end generations of the models in various experiments in this study, in this section we showcase the prompt template we use for each kind of evaluation." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 314, + 506, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 314, + 506, + 360 + ], + "spans": [ + { + "bbox": [ + 104, + 314, + 506, + 360 + ], + "type": "text", + "content": "For the evaluation of the models' answer accuracy and abstain rate, we adopt the following prompt templates designed for 'paired' and 'non-paired' data, respectively. As we observe that some models, for example Gemma-2-27B-IT, often output an additional " + }, + { + "bbox": [ + 104, + 314, + 506, + 360 + ], + "type": "inline_equation", + "content": "\\backslash n\\backslash n" + }, + { + "bbox": [ + 104, + 314, + 506, + 360 + ], + "type": "text", + "content": " at the end of response, we take the last two paragraph segmented by " + }, + { + "bbox": [ + 104, + 314, + 506, + 360 + ], + "type": "inline_equation", + "content": "\\backslash n\\backslash n" + }, + { + "bbox": [ + 104, + 314, + 506, + 360 + ], + "type": "text", + "content": " to avoid pasing in an empty string." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 160, + 373, + 449, + 385 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 160, + 373, + 449, + 385 + ], + "spans": [ + { + "bbox": [ + 160, + 373, + 449, + 385 + ], + "type": "text", + "content": "Prompt Template for Response Evaluation of Formula and SVAMP" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 117, + 393, + 492, + 444 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 393, + 492, + 444 + ], + "spans": [ + { + "bbox": [ + 117, + 393, + 492, + 444 + ], + "type": "text", + "content": "You are a helpful assistant that evaluates the quality of a model's answer. You will be given a question and a model's answer. You need to evaluate the correctness of the model's answer. If the answer explicitly says that the condition of the question is insufficient, you should return 0. If the model provides an answer that is a number or formula with variables, you should return 1. Please only return the number, no other text." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 118, + 444, + 257, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 444, + 257, + 455 + ], + "spans": [ + { + "bbox": [ + 118, + 444, + 257, + 455 + ], + "type": "text", + "content": "Model answer: [model_answer_short]" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 480, + 504, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 480, + 504, + 515 + ], + "spans": [ + { + "bbox": [ + 104, + 480, + 504, + 515 + ], + "type": "text", + "content": "Figure 7: The prompt we use to evaluate the accuracy and abstain rate of the model on Formula and SVAMP. [model_answer_short] is the last two paragraphs of the model answer and [reference_answer] is the answer for the original dataset." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 163, + 522, + 447, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 163, + 522, + 447, + 534 + ], + "spans": [ + { + "bbox": [ + 163, + 522, + 447, + 534 + ], + "type": "text", + "content": "Prompt Template for Response Evaluation of GSM8K and MATH" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 117, + 543, + 492, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 543, + 492, + 594 + ], + "spans": [ + { + "bbox": [ + 117, + 543, + 492, + 594 + ], + "type": "text", + "content": "You are a helpful assistant that evaluates the quality of a model's answer. You will be given a question and a model's answer. You need to evaluate the correctness of the model's answer. If the model output says that the condition of the question is insufficient, you should return 0. Otherwise, if the model gives a clear answer and matches the reference answer, you should return 1. If the model's answer does not match the reference answer, you should return 2. Please only return the number, no other text." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 118, + 594, + 257, + 604 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 594, + 257, + 604 + ], + "spans": [ + { + "bbox": [ + 118, + 594, + 257, + 604 + ], + "type": "text", + "content": "Model answer: [model_answer_short]" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 118, + 604, + 259, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 604, + 259, + 614 + ], + "spans": [ + { + "bbox": [ + 118, + 604, + 259, + 614 + ], + "type": "text", + "content": "Reference answer: [reference_answer]" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 640, + 504, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 640, + 504, + 674 + ], + "spans": [ + { + "bbox": [ + 104, + 640, + 504, + 674 + ], + "type": "text", + "content": "Figure 8: The prompt we use to evaluate the accuracy and abstain rate of the model on GSM8K and MATH. [model_answer_short] is the last two paragraphs of the model answer and [reference_answer] is the answer for the original dataset." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 687, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 506, + 723 + ], + "type": "text", + "content": "We use the prompt template in Figure 9 to find the first paragraph that the model suspected a missing premise. We pass in the response sequentially by paragraph until the GPT-4o give a positive response. In practice we find it is not very stable, so we repeat this process for 3 times and use the median value." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 300, + 751, + 310, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 164, + 83, + 447, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 83, + 447, + 95 + ], + "spans": [ + { + "bbox": [ + 164, + 83, + 447, + 95 + ], + "type": "text", + "content": "Prompt Template for Response Evaluation of GSM8K and MATH" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 118, + 102, + 492, + 123 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 102, + 492, + 123 + ], + "spans": [ + { + "bbox": [ + 118, + 102, + 492, + 123 + ], + "type": "text", + "content": "You are a helpful assistant. You will be given a paragraph which is part of the answer to a question. You need to identify if the paragraph doubt that the answer depends on some other unspecified condition." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 118, + 123, + 205, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 123, + 205, + 133 + ], + "spans": [ + { + "bbox": [ + 118, + 123, + 205, + 133 + ], + "type": "text", + "content": "Paragraph: [paragraph]" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 118, + 134, + 233, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 134, + 233, + 144 + ], + "spans": [ + { + "bbox": [ + 118, + 134, + 233, + 144 + ], + "type": "text", + "content": "Answer in one word, yes or no." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 170, + 504, + 194 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 170, + 504, + 194 + ], + "spans": [ + { + "bbox": [ + 104, + 170, + 504, + 194 + ], + "type": "text", + "content": "Figure 9: The prompt we use to judge if the model suspect there is a missing premise in the response paragraph. [paragraph] is the part of the model response spited by " + }, + { + "bbox": [ + 104, + 170, + 504, + 194 + ], + "type": "inline_equation", + "content": "\\backslash n\\backslash n" + } + ] + } + ], + "index": 5, + "type": "text" + }, + { + "bbox": [ + 105, + 221, + 279, + 236 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 221, + 279, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 221, + 279, + 236 + ], + "type": "text", + "content": "D Examples of Model Response" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 246, + 506, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 246, + 506, + 335 + ], + "spans": [ + { + "bbox": [ + 104, + 246, + 506, + 335 + ], + "type": "text", + "content": "In this section, we present some examples of the model response of both non-reasoning and reasoning model on MiP data. As we can see from Figure 10 and Figure 11, the non-reasoning models soon identify the missing premise issue of the question. They either abstain from answering the question, as in Figure 10, or friendly invite the user to provide more information. However, as we can see from Figure 11 and Figure 13, reasoning models generate extremely verbose answers on these two apparently premise missing problems. What is worse, they fail to abstain to answer the question. The response in Figure 11 arrives at an absurd answer, and the model in Figure 13 generates a hallucinated answer based on its assumption rather than provided information." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 170, + 347, + 441, + 360 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 347, + 441, + 360 + ], + "spans": [ + { + "bbox": [ + 170, + 347, + 441, + 360 + ], + "type": "text", + "content": "Model Response for a MiP-Formula Question from Gemini-1.5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 118, + 368, + 279, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 368, + 279, + 379 + ], + "spans": [ + { + "bbox": [ + 118, + 368, + 279, + 379 + ], + "type": "text", + "content": "Question: What is the value of " + }, + { + "bbox": [ + 118, + 368, + 279, + 379 + ], + "type": "inline_equation", + "content": "((\\gamma)) + \\Xi" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 119, + 379, + 187, + 388 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 379, + 187, + 388 + ], + "spans": [ + { + "bbox": [ + 119, + 379, + 187, + 388 + ], + "type": "text", + "content": "Model Response:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 118, + 388, + 492, + 408 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 388, + 492, + 408 + ], + "spans": [ + { + "bbox": [ + 118, + 388, + 492, + 408 + ], + "type": "text", + "content": "The expression " + }, + { + "bbox": [ + 118, + 388, + 492, + 408 + ], + "type": "inline_equation", + "content": "((\\gamma)) + \\Xi" + }, + { + "bbox": [ + 118, + 388, + 492, + 408 + ], + "type": "text", + "content": " involves two symbols that need to be defined to determine the value of the expression." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 118, + 408, + 492, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 408, + 492, + 437 + ], + "spans": [ + { + "bbox": [ + 118, + 408, + 492, + 437 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 118, + 408, + 492, + 437 + ], + "type": "text", + "content": " is the Euler-Mascheroni constant, approximately equal to 0.5772156649. The double parentheses notation " + }, + { + "bbox": [ + 118, + 408, + 492, + 437 + ], + "type": "inline_equation", + "content": "(\\gamma)" + }, + { + "bbox": [ + 118, + 408, + 492, + 437 + ], + "type": "text", + "content": " is not a standard mathematical notation. Without further definition of this notation, we cannot evaluate this term." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 118, + 438, + 492, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 438, + 492, + 479 + ], + "spans": [ + { + "bbox": [ + 118, + 438, + 492, + 479 + ], + "type": "inline_equation", + "content": "\\Xi" + }, + { + "bbox": [ + 118, + 438, + 492, + 479 + ], + "type": "text", + "content": " is the Riemann Xi function. It is related to the Riemann zeta function, but the notation " + }, + { + "bbox": [ + 118, + 438, + 492, + 479 + ], + "type": "inline_equation", + "content": "\\Xi" + }, + { + "bbox": [ + 118, + 438, + 492, + 479 + ], + "type": "text", + "content": " itself isn't a specific numerical value. The Riemann Xi function is defined as a function of a complex variable, not a constant. So " + }, + { + "bbox": [ + 118, + 438, + 492, + 479 + ], + "type": "inline_equation", + "content": "\\Xi" + }, + { + "bbox": [ + 118, + 438, + 492, + 479 + ], + "type": "text", + "content": " by itself doesn't have a specific numerical value unless a specific value is substituted for its argument." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 118, + 479, + 492, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 479, + 492, + 499 + ], + "spans": [ + { + "bbox": [ + 118, + 479, + 492, + 499 + ], + "type": "text", + "content": "Therefore, we cannot determine the value of " + }, + { + "bbox": [ + 118, + 479, + 492, + 499 + ], + "type": "inline_equation", + "content": "((\\gamma)) + \\Xi" + }, + { + "bbox": [ + 118, + 479, + 492, + 499 + ], + "type": "text", + "content": " without knowing what " + }, + { + "bbox": [ + 118, + 479, + 492, + 499 + ], + "type": "inline_equation", + "content": "((\\gamma))" + }, + { + "bbox": [ + 118, + 479, + 492, + 499 + ], + "type": "text", + "content": " and the argument of " + }, + { + "bbox": [ + 118, + 479, + 492, + 499 + ], + "type": "inline_equation", + "content": "\\Xi" + }, + { + "bbox": [ + 118, + 479, + 492, + 499 + ], + "type": "text", + "content": " represent. The expression is incomplete without definitions for these symbols." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 119, + 499, + 292, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 499, + 292, + 515 + ], + "spans": [ + { + "bbox": [ + 119, + 499, + 292, + 515 + ], + "type": "text", + "content": "Final Answer: The final answer is undefined" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 542, + 504, + 565 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 542, + 504, + 565 + ], + "spans": [ + { + "bbox": [ + 104, + 542, + 504, + 565 + ], + "type": "text", + "content": "Figure 10: An example of model response from Gemini_1.5 on MiP-Formula dataset. The model quickly identify the missing premise and abstain to answer." + } + ] + } + ], + "index": 16, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 179, + 313, + 432, + 326 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 313, + 432, + 326 + ], + "spans": [ + { + "bbox": [ + 179, + 313, + 432, + 326 + ], + "type": "text", + "content": "Model Response for a MiP-GSM8k Question from GPT-40" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 118, + 334, + 492, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 334, + 492, + 346 + ], + "spans": [ + { + "bbox": [ + 118, + 334, + 492, + 346 + ], + "type": "text", + "content": "Question: Kyle bought last year's best-selling book for $19.50. What was the original price of the book?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 119, + 346, + 187, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 346, + 187, + 354 + ], + "spans": [ + { + "bbox": [ + 119, + 346, + 187, + 354 + ], + "type": "text", + "content": "Model Response:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 118, + 354, + 491, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 354, + 491, + 384 + ], + "spans": [ + { + "bbox": [ + 118, + 354, + 491, + 384 + ], + "type": "text", + "content": "To determine the original price of the book, we generally need to know the discount or markdown percentage applied to calculate its selling price of " + }, + { + "bbox": [ + 118, + 354, + 491, + 384 + ], + "type": "inline_equation", + "content": "19.50. If Kyle bought the book for" + }, + { + "bbox": [ + 118, + 354, + 491, + 384 + ], + "type": "text", + "content": "19.50 and we know it was discounted, we can use the following formula to find out the original price:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 183, + 394, + 426, + 405 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 183, + 394, + 426, + 405 + ], + "spans": [ + { + "bbox": [ + 183, + 394, + 426, + 405 + ], + "type": "interline_equation", + "content": "S e l l i n g P r i c e = \\text {O r i g i n a l P r i c e} - (\\text {O r i g i n a l P r i c e} \\times \\text {D i s c o u n t R a t e})", + "image_path": "e125f721d46e8e08999101331132b083a61090681b41dae80678cdac1c749666.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 118, + 418, + 491, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 418, + 491, + 438 + ], + "spans": [ + { + "bbox": [ + 118, + 418, + 491, + 438 + ], + "type": "text", + "content": "Without additional information about discounts, promotions, or markdown rates applied, it's not possible to precisely determine the original price solely from the amount Kyle paid." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 119, + 438, + 454, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 438, + 454, + 449 + ], + "spans": [ + { + "bbox": [ + 119, + 438, + 454, + 449 + ], + "type": "text", + "content": "If you provide the discount percentage or rate, I can help calculate the original price for you!" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 475, + 504, + 498 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 504, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 504, + 498 + ], + "type": "text", + "content": "Figure 11: An example of model response from GPT-4o on MiP-GSM8k dataset. The model quickly identify the missing premise and ask the user for more information." + } + ] + } + ], + "index": 8, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 176, + 107, + 434, + 119 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 107, + 434, + 119 + ], + "spans": [ + { + "bbox": [ + 176, + 107, + 434, + 119 + ], + "type": "text", + "content": "Model Response for a MiP-Formula Question from s1.1-32B" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 118, + 128, + 278, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 128, + 278, + 140 + ], + "spans": [ + { + "bbox": [ + 118, + 128, + 278, + 140 + ], + "type": "text", + "content": "Question: What is the value of " + }, + { + "bbox": [ + 118, + 128, + 278, + 140 + ], + "type": "inline_equation", + "content": "((\\gamma)) + \\Xi" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 119, + 140, + 187, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 140, + 187, + 148 + ], + "spans": [ + { + "bbox": [ + 119, + 140, + 187, + 148 + ], + "type": "text", + "content": "Model Response:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 118, + 148, + 490, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 148, + 490, + 189 + ], + "spans": [ + { + "bbox": [ + 118, + 148, + 490, + 189 + ], + "type": "text", + "content": "Okay, so I need to figure out the value of " + }, + { + "bbox": [ + 118, + 148, + 490, + 189 + ], + "type": "inline_equation", + "content": "((\\gamma)) + \\Xi" + }, + { + "bbox": [ + 118, + 148, + 490, + 189 + ], + "type": "text", + "content": ". Hmm, first off, I should probably clarify what these symbols represent. The problem uses double parentheses around gamma, so " + }, + { + "bbox": [ + 118, + 148, + 490, + 189 + ], + "type": "inline_equation", + "content": "((\\gamma))" + }, + { + "bbox": [ + 118, + 148, + 490, + 189 + ], + "type": "text", + "content": ", and then the Greek letter Xi (Ξ). I wonder if these are standard mathematical symbols or if they're defined in a specific context that I might not be aware of." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 118, + 189, + 491, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 189, + 491, + 258 + ], + "spans": [ + { + "bbox": [ + 118, + 189, + 491, + 258 + ], + "type": "text", + "content": "Let me start by recalling what I know about gamma. In mathematics, " + }, + { + "bbox": [ + 118, + 189, + 491, + 258 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 118, + 189, + 491, + 258 + ], + "type": "text", + "content": " often refers to the Euler-Mascheroni constant, which is approximately 0.5772. It's a important constant in number theory and analysis, related to the harmonic series and the gamma function. But here, the notation is " + }, + { + "bbox": [ + 118, + 189, + 491, + 258 + ], + "type": "inline_equation", + "content": "((\\gamma))" + }, + { + "bbox": [ + 118, + 189, + 491, + 258 + ], + "type": "text", + "content": ". The double parentheses could mean something specific. Maybe it's a notation from a particular field or textbook? For example, in some contexts, double parentheses might denote an interval or a floor function, but usually, floor is denoted by brackets like " + }, + { + "bbox": [ + 118, + 189, + 491, + 258 + ], + "type": "inline_equation", + "content": "(\\gamma)" + }, + { + "bbox": [ + 118, + 189, + 491, + 258 + ], + "type": "text", + "content": ". Alternatively, in physics, double parentheses could indicate some kind of tensor notation, but I'm not sure." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 118, + 258, + 491, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 258, + 491, + 317 + ], + "spans": [ + { + "bbox": [ + 118, + 258, + 491, + 317 + ], + "type": "text", + "content": "Then there's the Xi (Ξ). Xi is another Greek letter, but in mathematics, Ξ could stand for various things. In set theory, sometimes Xi is used as a variable for ordinals, or in other areas like particle physics, Ξ represents certain particles. But again, without context, it's hard to say. Maybe it's a function or a constant defined in the problem's context? Wait, the question is just asking for the value of " + }, + { + "bbox": [ + 118, + 258, + 491, + 317 + ], + "type": "inline_equation", + "content": "((\\gamma)) + \\Xi" + }, + { + "bbox": [ + 118, + 258, + 491, + 317 + ], + "type": "text", + "content": " without any additional information. That makes me think these symbols might be part of a puzzle or a play on notation rather than standard mathematical constants." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 118, + 318, + 491, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 318, + 491, + 377 + ], + "spans": [ + { + "bbox": [ + 118, + 318, + 491, + 377 + ], + "type": "text", + "content": "Let me consider if this could be a trick question. The double parentheses around gamma((" + }, + { + "bbox": [ + 118, + 318, + 491, + 377 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 118, + 318, + 491, + 377 + ], + "type": "text", + "content": ") ) maybe that's referring to the number of letters in the word \"gamma\" or something like that? Let's see: Gamma is spelled G-A-M-M-A, which is 5 letters. So ((" + }, + { + "bbox": [ + 118, + 318, + 491, + 377 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 118, + 318, + 491, + 377 + ], + "type": "text", + "content": ")) could be 5. Then " + }, + { + "bbox": [ + 118, + 318, + 491, + 377 + ], + "type": "inline_equation", + "content": "\\Xi" + }, + { + "bbox": [ + 118, + 318, + 491, + 377 + ], + "type": "text", + "content": " is the Greek letter Xi, which is the 14th letter of the Greek alphabet. So maybe " + }, + { + "bbox": [ + 118, + 318, + 491, + 377 + ], + "type": "inline_equation", + "content": "\\Xi" + }, + { + "bbox": [ + 118, + 318, + 491, + 377 + ], + "type": "text", + "content": " represents 14? Then " + }, + { + "bbox": [ + 118, + 318, + 491, + 377 + ], + "type": "inline_equation", + "content": "5 + 14" + }, + { + "bbox": [ + 118, + 318, + 491, + 377 + ], + "type": "text", + "content": " would be 19. Is that possible? But is there a standard where Greek letters are assigned numerical values like Roman numerals? Wait, in Greek numerals, each letter does have a numerical value. Let me check that." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 119, + 378, + 364, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 378, + 364, + 388 + ], + "spans": [ + { + "bbox": [ + 119, + 378, + 364, + 388 + ], + "type": "text", + "content": "In the Greek numeral system, letters are assigned values as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 256, + 389, + 353, + 398 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 256, + 389, + 353, + 398 + ], + "spans": [ + { + "bbox": [ + 256, + 389, + 353, + 398 + ], + "type": "text", + "content": ".67 more paragraphs here." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 118, + 398, + 490, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 398, + 490, + 418 + ], + "spans": [ + { + "bbox": [ + 118, + 398, + 490, + 418 + ], + "type": "text", + "content": "But just to check another angle: In the context of APL programming language, certain symbols have specific meanings, but I don't think that's relevant here." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 118, + 418, + 490, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 418, + 490, + 437 + ], + "spans": [ + { + "bbox": [ + 118, + 418, + 490, + 437 + ], + "type": "text", + "content": "Alternatively, in chemistry, " + }, + { + "bbox": [ + 118, + 418, + 490, + 437 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 118, + 418, + 490, + 437 + ], + "type": "text", + "content": " is used to denote surface tension, and " + }, + { + "bbox": [ + 118, + 418, + 490, + 437 + ], + "type": "inline_equation", + "content": "\\Xi" + }, + { + "bbox": [ + 118, + 418, + 490, + 437 + ], + "type": "text", + "content": " is not a standard symbol. In engineering, " + }, + { + "bbox": [ + 118, + 418, + 490, + 437 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 118, + 418, + 490, + 437 + ], + "type": "text", + "content": " is sometimes the shear strain. But again, not relevant." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 118, + 437, + 490, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 437, + 490, + 456 + ], + "spans": [ + { + "bbox": [ + 118, + 437, + 490, + 456 + ], + "type": "text", + "content": "Alternatively, in statistics, gamma is a distribution, and Xi could be a parameter, but again, without context." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 118, + 457, + 490, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 457, + 490, + 477 + ], + "spans": [ + { + "bbox": [ + 118, + 457, + 490, + 477 + ], + "type": "text", + "content": "Alternatively, the symbols might be from a specific equation in a paper or book, but since I don't have that reference, I can't confirm." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 118, + 478, + 491, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 478, + 491, + 517 + ], + "spans": [ + { + "bbox": [ + 118, + 478, + 491, + 517 + ], + "type": "text", + "content": "Given all these considerations, and trying to think of what the answer might be expected to be in a problem like this, it's likely either 17 or 63. If the question is from a math competition or puzzle, they might be expecting the Greek numeral values. For example, in the Greek numeral system, " + }, + { + "bbox": [ + 118, + 478, + 491, + 517 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 118, + 478, + 491, + 517 + ], + "type": "text", + "content": " is 3 and " + }, + { + "bbox": [ + 118, + 478, + 491, + 517 + ], + "type": "inline_equation", + "content": "\\Xi" + }, + { + "bbox": [ + 118, + 478, + 491, + 517 + ], + "type": "text", + "content": " is 60, so " + }, + { + "bbox": [ + 118, + 478, + 491, + 517 + ], + "type": "inline_equation", + "content": "3 + 60 = 63" + }, + { + "bbox": [ + 118, + 478, + 491, + 517 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 118, + 517, + 380, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 517, + 380, + 528 + ], + "spans": [ + { + "bbox": [ + 118, + 517, + 380, + 528 + ], + "type": "text", + "content": "Alternatively, if it's based on their positions in the alphabet, " + }, + { + "bbox": [ + 118, + 517, + 380, + 528 + ], + "type": "inline_equation", + "content": "3 + 14 = 17" + }, + { + "bbox": [ + 118, + 517, + 380, + 528 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 256, + 529, + 353, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 256, + 529, + 353, + 537 + ], + "spans": [ + { + "bbox": [ + 256, + 529, + 353, + 537 + ], + "type": "text", + "content": ".21 more paragraphs here" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 118, + 537, + 491, + 558 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 537, + 491, + 558 + ], + "spans": [ + { + "bbox": [ + 118, + 537, + 491, + 558 + ], + "type": "text", + "content": "Therefore, the answer is 63. The double parentheses around gamma might be a typo or just for decoration, but given the problem's phrasing, interpreting them as Greek numerals seems the most logical." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 118, + 559, + 490, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 559, + 490, + 580 + ], + "spans": [ + { + "bbox": [ + 118, + 559, + 490, + 580 + ], + "type": "text", + "content": "*Final Answer**" + }, + { + "bbox": [ + 118, + 559, + 490, + 580 + ], + "type": "inline_equation", + "content": "\\boxed{63}" + }, + { + "bbox": [ + 118, + 559, + 490, + 580 + ], + "type": "text", + "content": " < /think> To determine the value of " + }, + { + "bbox": [ + 118, + 559, + 490, + 580 + ], + "type": "inline_equation", + "content": "((\\gamma) + \\Xi)" + }, + { + "bbox": [ + 118, + 559, + 490, + 580 + ], + "type": "text", + "content": ", we need to consider the possible meanings of the symbols " + }, + { + "bbox": [ + 118, + 559, + 490, + 580 + ], + "type": "inline_equation", + "content": "(\\gamma)" + }, + { + "bbox": [ + 118, + 559, + 490, + 580 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 118, + 559, + 490, + 580 + ], + "type": "inline_equation", + "content": "(\\Xi)" + }, + { + "bbox": [ + 118, + 559, + 490, + 580 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 118, + 580, + 490, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 580, + 490, + 620 + ], + "spans": [ + { + "bbox": [ + 118, + 580, + 490, + 620 + ], + "type": "text", + "content": "1. **Greek Numerals**: In the Greek numeral system, each letter has a specific numerical value. The letter " + }, + { + "bbox": [ + 118, + 580, + 490, + 620 + ], + "type": "inline_equation", + "content": "(\\gamma)" + }, + { + "bbox": [ + 118, + 580, + 490, + 620 + ], + "type": "text", + "content": " (gamma) represents the value 3, and the letter " + }, + { + "bbox": [ + 118, + 580, + 490, + 620 + ], + "type": "inline_equation", + "content": "(\\Xi)" + }, + { + "bbox": [ + 118, + 580, + 490, + 620 + ], + "type": "text", + "content": " (Xi) represents the value 60. 2. **Double Parentheses**: The notation " + }, + { + "bbox": [ + 118, + 580, + 490, + 620 + ], + "type": "inline_equation", + "content": "((\\gamma))" + }, + { + "bbox": [ + 118, + 580, + 490, + 620 + ], + "type": "text", + "content": " might be a red herring or simply emphasize that " + }, + { + "bbox": [ + 118, + 580, + 490, + 620 + ], + "type": "inline_equation", + "content": "(\\gamma)" + }, + { + "bbox": [ + 118, + 580, + 490, + 620 + ], + "type": "text", + "content": " is being used as a numeral. The parentheses do not change the numerical value of " + }, + { + "bbox": [ + 118, + 580, + 490, + 620 + ], + "type": "inline_equation", + "content": "(\\gamma)" + }, + { + "bbox": [ + 118, + 580, + 490, + 620 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 118, + 620, + 441, + 630 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 620, + 441, + 630 + ], + "spans": [ + { + "bbox": [ + 118, + 620, + 441, + 630 + ], + "type": "text", + "content": "Combining these values, we add the numerical values of " + }, + { + "bbox": [ + 118, + 620, + 441, + 630 + ], + "type": "inline_equation", + "content": "(\\gamma)" + }, + { + "bbox": [ + 118, + 620, + 441, + 630 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 118, + 620, + 441, + 630 + ], + "type": "inline_equation", + "content": "(\\Xi)" + }, + { + "bbox": [ + 118, + 620, + 441, + 630 + ], + "type": "text", + "content": ": " + }, + { + "bbox": [ + 118, + 620, + 441, + 630 + ], + "type": "inline_equation", + "content": "-(\\gamma = 3) - (\\Xi = 60)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 119, + 630, + 281, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 630, + 281, + 639 + ], + "spans": [ + { + "bbox": [ + 119, + 630, + 281, + 639 + ], + "type": "text", + "content": "Thus, the value of " + }, + { + "bbox": [ + 119, + 630, + 281, + 639 + ], + "type": "inline_equation", + "content": "((\\gamma) + \\Xi)" + }, + { + "bbox": [ + 119, + 630, + 281, + 639 + ], + "type": "text", + "content": " is " + }, + { + "bbox": [ + 119, + 630, + 281, + 639 + ], + "type": "inline_equation", + "content": "(3 + 60 = 63)" + }, + { + "bbox": [ + 119, + 630, + 281, + 639 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 120, + 640, + 136, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 640, + 136, + 654 + ], + "spans": [ + { + "bbox": [ + 120, + 640, + 136, + 654 + ], + "type": "text", + "content": "63" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 681, + 504, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 681, + 504, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 681, + 504, + 704 + ], + "type": "text", + "content": "Figure 12: An example of response from s1.1 model on MiP-Formula data. The model spend lots of time doing inefficient and redundant reasoning before outputting a meaningless result." + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 167, + 83, + 442, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 83, + 442, + 95 + ], + "spans": [ + { + "bbox": [ + 167, + 83, + 442, + 95 + ], + "type": "text", + "content": "Model Response for a MiP-GSM8k Question from DeepSeek-R1" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 118, + 103, + 492, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 103, + 492, + 114 + ], + "spans": [ + { + "bbox": [ + 118, + 103, + 492, + 114 + ], + "type": "text", + "content": "Question: Kyle bought last year's best-selling book for $19.50. What was the original price of the book?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 119, + 114, + 187, + 124 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 114, + 187, + 124 + ], + "spans": [ + { + "bbox": [ + 119, + 114, + 187, + 124 + ], + "type": "text", + "content": "Model Response:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 118, + 124, + 492, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 124, + 492, + 163 + ], + "spans": [ + { + "bbox": [ + 118, + 124, + 492, + 163 + ], + "type": "text", + "content": "Okay, let's see. Kyle bought a book for $19.50, and the question is asking for the original price. Hmm, wait a second. The problem mentions that it's last year's best-selling book, so maybe it was on sale or discounted? The question doesn't explicitly state that there was a discount, but it does say \"last year's,\" which might imply that the book is now being sold at a reduced price." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 118, + 163, + 492, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 163, + 492, + 203 + ], + "spans": [ + { + "bbox": [ + 118, + 163, + 492, + 203 + ], + "type": "text", + "content": "But hold on, the problem doesn't give any specific information about a discount percentage or a markdown. Without that, how can we calculate the original price? Maybe there's some context or common knowledge I'm missing here. For example, sometimes best-selling books are sold at a standard discount, but that's not a universal rule." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 118, + 203, + 492, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 203, + 492, + 243 + ], + "spans": [ + { + "bbox": [ + 118, + 203, + 492, + 243 + ], + "type": "text", + "content": " Alternatively, maybe the question is testing understanding of something else, like sales tax or profit margins. But if it were sales tax, the original price would be less than $19.50, and the total after tax would be more. But the question is asking for the original price, implying that $19.50 is the price after some change." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 118, + 243, + 492, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 243, + 492, + 283 + ], + "spans": [ + { + "bbox": [ + 118, + 243, + 492, + 283 + ], + "type": "text", + "content": "Wait, perhaps the problem is incomplete? The user just says \"Kyle bought last year's best-selling book for $19.50. What was the original price of the book?\" If there's no additional information given, like a percentage discount or a mention that the price was reduced, then it's impossible to determine the original price." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 118, + 283, + 492, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 283, + 492, + 313 + ], + "spans": [ + { + "bbox": [ + 118, + 283, + 492, + 313 + ], + "type": "text", + "content": "But maybe I need to make an assumption here. Since it's last year's best-selling book, it's common for such books to be discounted when a new edition comes out or after some time. Let me think, maybe the standard discount is " + }, + { + "bbox": [ + 118, + 283, + 492, + 313 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 118, + 283, + 492, + 313 + ], + "type": "text", + "content": " or something? But the problem doesn't specify." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 118, + 313, + 492, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 313, + 492, + 343 + ], + "spans": [ + { + "bbox": [ + 118, + 313, + 492, + 343 + ], + "type": "text", + "content": "Wait, perhaps the problem was part of a larger set where the discount rate was mentioned earlier, and this is a follow-up question. But as it stands, with the information given here, there's no way to calculate the original price." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 118, + 343, + 492, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 343, + 492, + 373 + ], + "spans": [ + { + "bbox": [ + 118, + 343, + 492, + 373 + ], + "type": "text", + "content": "Alternatively, maybe the question is a trick question, where the original price is the same as what Kyle paid. But that doesn't make sense because it specifies \"last year's best-selling book,\" which might imply it was sold at a different price before." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 118, + 373, + 492, + 402 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 373, + 492, + 402 + ], + "spans": [ + { + "bbox": [ + 118, + 373, + 492, + 402 + ], + "type": "text", + "content": " Let me check the problem statement again: \"Kyle bought last year's best-selling book for $19.50. What was the original price of the book?\" There's no mention of a discount, a sale, or any other financial context." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 118, + 403, + 492, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 403, + 492, + 443 + ], + "spans": [ + { + "bbox": [ + 118, + 403, + 492, + 443 + ], + "type": "text", + "content": " If I were to approach this mathematically, let's denote the original price as P. If there was a discount, then $19.50 is equal to P minus some discount. But without knowing the discount rate, we can't set up an equation. For example, if the book was discounted by 20%, then the original price would be $19.50 / 0.80. But since we don't have that percentage, we can't proceed." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 257, + 443, + 507, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 443, + 507, + 453 + ], + "spans": [ + { + "bbox": [ + 257, + 443, + 507, + 453 + ], + "type": "text", + "content": ".24 more paragraphs here" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 118, + 453, + 492, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 453, + 492, + 472 + ], + "spans": [ + { + "bbox": [ + 118, + 453, + 492, + 472 + ], + "type": "text", + "content": " Alternatively, if the answer is expected to be $19.50, but that doesn't make sense because the question is asking for the original price, implying it was different." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 118, + 472, + 492, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 472, + 492, + 502 + ], + "spans": [ + { + "bbox": [ + 118, + 472, + 492, + 502 + ], + "type": "text", + "content": "Alternatively, maybe the problem is in another language where \"original price\" refers to the price without tax, but in the US, sales tax isn't included in the price, so if \\(19.50 is including tax, the original pre-tax price would be less. But again, without tax rate, can't compute." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 118, + 502, + 492, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 502, + 492, + 522 + ], + "spans": [ + { + "bbox": [ + 118, + 502, + 492, + 522 + ], + "type": "text", + "content": "Alternatively, if the problem is from a country where tax is included, but the question is asking for the pre-tax price, but again, no tax rate given." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 118, + 522, + 492, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 522, + 492, + 552 + ], + "spans": [ + { + "bbox": [ + 118, + 522, + 492, + 552 + ], + "type": "text", + "content": "This is really confusing. Since the problem is presented in English and mentions dollars, likely US context. In the US, sales tax is added at checkout, so the price Kyle paid would be the shelf price plus tax. If the question is asking for the pre-tax price, but again, no tax rate given." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 118, + 552, + 492, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 552, + 492, + 572 + ], + "spans": [ + { + "bbox": [ + 118, + 552, + 492, + 572 + ], + "type": "text", + "content": "Alternatively, maybe the book was on sale, and the original price is the same as the sale price, but that contradicts the wording." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 118, + 572, + 492, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 572, + 492, + 612 + ], + "spans": [ + { + "bbox": [ + 118, + 572, + 492, + 612 + ], + "type": "text", + "content": " Given all this, I think the most plausible answer, even though it's based on an assumption, is $26, assuming a 25% discount. Therefore, the original price was $26. To determine the original price of the book, we need to consider that Kyle likely purchased it at a discounted price since it's \"last year's best-selling book.\" A common assumption for such scenarios is a **25% discount**." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 119, + 612, + 310, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 612, + 310, + 622 + ], + "spans": [ + { + "bbox": [ + 119, + 612, + 310, + 622 + ], + "type": "text", + "content": "Let " + }, + { + "bbox": [ + 119, + 612, + 310, + 622 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 119, + 612, + 310, + 622 + ], + "type": "text", + "content": " be the original price. If the discounted price is" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 119, + 622, + 381, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 622, + 381, + 632 + ], + "spans": [ + { + "bbox": [ + 119, + 622, + 381, + 632 + ], + "type": "text", + "content": "$19.50, this represents 75% of the original price (100% - 25% discount):" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 120, + 632, + 181, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 632, + 181, + 642 + ], + "spans": [ + { + "bbox": [ + 120, + 632, + 181, + 642 + ], + "type": "inline_equation", + "content": "[0.75P = 19.50]" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 120, + 642, + 172, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 642, + 172, + 651 + ], + "spans": [ + { + "bbox": [ + 120, + 642, + 172, + 651 + ], + "type": "text", + "content": "Solving for " + }, + { + "bbox": [ + 120, + 642, + 172, + 651 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 120, + 642, + 172, + 651 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 120, + 651, + 184, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 651, + 184, + 664 + ], + "spans": [ + { + "bbox": [ + 120, + 651, + 184, + 664 + ], + "type": "text", + "content": "[ P = \\frac{19.50}{0.75} = 26 ]" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 120, + 664, + 229, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 664, + 229, + 673 + ], + "spans": [ + { + "bbox": [ + 120, + 664, + 229, + 673 + ], + "type": "text", + "content": "*Original price of the book:**" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 120, + 673, + 136, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 673, + 136, + 687 + ], + "spans": [ + { + "bbox": [ + 120, + 673, + 136, + 687 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 715, + 504, + 737 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 715, + 504, + 737 + ], + "spans": [ + { + "bbox": [ + 104, + 715, + 504, + 737 + ], + "type": "text", + "content": "Figure 13: An example of model response from DeepSeek-R1 on MiP-GSM8k dataset. After thinking for a long time, the model hallucinates an answer based on its assumption of discount rate." + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 202, + 37 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06614/e7bb7c32-5113-4b8e-ba10-d1799d972e03_content_list.json b/data/2025/2504_06xxx/2504.06614/e7bb7c32-5113-4b8e-ba10-d1799d972e03_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..e809ad2c16b68e38edbc371d1f56612a942de473 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06614/e7bb7c32-5113-4b8e-ba10-d1799d972e03_content_list.json @@ -0,0 +1,1180 @@ +[ + { + "type": "text", + "text": "AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents", + "text_level": 1, + "bbox": [ + 153, + 101, + 843, + 151 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Lingzhe Zhang† \nPeking University \nBeijing, China \nzhang.lingzhe@stu.pku.edu.cn", + "bbox": [ + 127, + 162, + 334, + 224 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yunpeng Zhai† \nAlibaba Group \nBeijing, China \nzhaiyunpeng.zyp@alibaba-inc.com", + "bbox": [ + 380, + 162, + 617, + 226 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tong Jia* \nPeking University \nBeijing, China \njia.tong@oku.edu.cn", + "bbox": [ + 694, + 164, + 836, + 226 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xiaosong Huang \nPeking University \nBeijing, China \nhxs@stu.pku.edu.cn", + "bbox": [ + 161, + 239, + 299, + 300 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Chiming Duan \nPeking University \nBeijing, China \nduanchiming@stu.pku.edu.cn", + "bbox": [ + 397, + 238, + 599, + 301 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Ying Li* \nPeking University \nBeijing, China \nli.ying@pku.edu.cn", + "bbox": [ + 700, + 238, + 833, + 301 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 83, + 309, + 183, + 321 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Distributed databases are critical infrastructures for today's large-scale software systems, making effective failure management essential to ensure software availability. However, existing approaches often overlook the role distinctions within distributed databases and rely on small-scale models with limited generalization capabilities. In this paper, we conduct a preliminary empirical study to emphasize the unique significance of different roles. Building on this insight, we propose AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. AgentFM addresses failure management by considering system roles, data roles, and task roles, with a meta-agent orchestrating these components. Preliminary evaluations using Apache IoTDB demonstrate the effectiveness of AgentFM and open new directions for further research.", + "bbox": [ + 81, + 329, + 483, + 521 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "CCS CONCEPTS", + "text_level": 1, + "bbox": [ + 83, + 536, + 220, + 550 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "- Software and its engineering $\\rightarrow$ Maintaining software.", + "bbox": [ + 83, + 555, + 455, + 569 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "KEYWORDS", + "text_level": 1, + "bbox": [ + 83, + 583, + 189, + 597 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Failure Management, Distributed Databases, Multi Agents", + "bbox": [ + 83, + 602, + 434, + 616 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ACM Reference Format:", + "text_level": 1, + "bbox": [ + 83, + 625, + 230, + 637 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Lingzhe Zhang†, Yunpeng Zhai†, Tong Jia*, Xiaosong Huang, Chiming Duan, and Ying Li*. 2025. AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents. In 33rd ACM International Conference on the Foundations of Software Engineering (FSE '25), June 23–28, 2025, Trondheim, Norway. ACM, New York, NY, USA, 5 pages. https://doi.org/10.1145/3696630.3728492", + "bbox": [ + 81, + 638, + 482, + 714 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$\\dagger$ Equal contribution.", + "*Corresponding author." + ], + "bbox": [ + 83, + 734, + 197, + 758 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org.", + "bbox": [ + 81, + 780, + 482, + 852 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "FSE '25, June 23-28, 2025, Trondheim, Norway", + "bbox": [ + 83, + 852, + 299, + 863 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM.", + "bbox": [ + 83, + 864, + 472, + 875 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ACM ISBN 979-8-4007-1276-0/25/06...$15.00", + "bbox": [ + 83, + 875, + 294, + 883 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://doi.org/10.1145/3696630.3728492", + "bbox": [ + 83, + 883, + 272, + 895 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 INTRODUCTION", + "text_level": 1, + "bbox": [ + 514, + 309, + 687, + 323 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The distributed databases, such as Google Spanner [2], Alibaba OceanBase [16], TiDB [5], and Apache IoTDB [14], have become integral components of cloud infrastructures, handling vast volumes of data [7, 25].", + "bbox": [ + 513, + 328, + 913, + 383 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "However, these systems frequently encounter anomalies such as system failures and performance degradation, leading to significant financial losses. For example, Alibaba Cloud faces Intermittent Slow Queries (iSQs) [11], leading to billions of dollars in annual losses. Amazon reports that even a 0.1-second delay in loading caused by database anomalies can lead to a $1\\%$ increase in financial losses [19]. Therefore, it is crucial to detect system failures in real time, analyze the root causes of these failures, and automatically remediate them.", + "bbox": [ + 513, + 383, + 913, + 494 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "System traces, metrics, and logs capture the states and critical events of active processes, making them essential for managing software failures. These data sources provide insights into both normal operations and deviations signaling potential failures. Leveraging their multimodal nature, recent research has enhanced anomaly detection and diagnosis in complex systems [6, 8-10, 17, 20-24, 26, 29, 30]. For example, Eadro [8] integrates anomaly detection and root cause localization using multi-source data, while AnoFusion [26] employs unsupervised multimodal failure detection in microservices. MSTGAD [6] combines all three data types with attentive multimodal learning for graph-based anomaly detection. Studies also explore two-source combinations, such as DeepTraLog [17], which pairs traces with logs using a GGNN-based model, and SCWarn [29], which identifies anomalies via heterogeneous metrics and logs. Although the effectiveness of these methods has been demonstrated in specific scenarios, they face several practical challenges when applied to distributed databases:", + "bbox": [ + 511, + 494, + 913, + 729 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Role Awareness. In distributed databases, nodes play different roles, each with varying levels of importance. Effective failure management often requires coordination and collaboration among multiple nodes. However, current methods completely overlook these roles, leading to suboptimal fault detection, inaccurate root cause diagnosis, and an inability to provide critical repair strategies.", + "- Application Limitations. Existing failure management models lack generalizability. For instance, these models are typically trained on specific systems and encounter concept drift when applied to new systems. Moreover, since these" + ], + "bbox": [ + 540, + 742, + 913, + 896 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.06614v1 [cs.SE] 9 Apr 2025", + "bbox": [ + 22, + 275, + 58, + 700 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "methods often frame the task as a classification problem, the resulting outputs lack interpretability, which is crucial for Operations and Control Engineers (OCEs) to effectively resolve issues.", + "bbox": [ + 124, + 107, + 480, + 161 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address the first challenge, we conduct a detailed analysis of the various roles within a distributed database and the entire failure management process. We ultimately identify three distinct role categories: system roles, data roles, and task roles. System roles represent the various roles inherent in the distributed database itself (e.g., leader nodes, follower nodes). Data roles refer to the different data sources involved in failure management, while task roles represent the various tasks that need to be executed during failure management.", + "bbox": [ + 81, + 166, + 480, + 290 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To tackle the second challenge, we adopt a large language model (LLM)-based approach. While many existing LLM-based failure management solutions have been proposed, a significant number of them do not utilize the three types of data sources mentioned above [1, 3, 12, 18, 28]. Moreover, some approaches that do incorporate these data sources fail to account for the role-based structure within distributed databases [4, 13, 27]. Therefore, we propose a role-aware, LLM-driven multi-agent approach that integrates the characteristics of these roles and the three data sources.", + "bbox": [ + 81, + 291, + 480, + 414 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Building on these insights, we introduce AgentFM, a comprehensive role-aware failure management framework for distributed databases, powered by LLM-driven multi-agent systems. AgentFM integrates the unique characteristics of distributed database roles with the rich multimodal data sources typically encountered in failure management, such as system traces, metrics, and logs. By employing a multi-agent architecture, AgentFM facilitates specialized agents for each role—system roles, data roles, and task roles—ensuring a more nuanced and effective approach to failure detection, diagnosis, and resolution.", + "bbox": [ + 81, + 416, + 480, + 553 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We conduct preliminary experiments on Apache IoTDB [14], a distributed time-series database system, to assess the effectiveness of AgentFM in failure detection and root cause analysis. Furthermore, we manually verified the accuracy and validity of the generated mitigation solution.", + "bbox": [ + 81, + 554, + 480, + 622 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Contributions. The contributions of this paper are threefold. First, we conduct a preliminary empirical study highlighting the varying significance of different roles in failure management. Second, we propose AgentFM, a role-aware failure management framework for distributed databases with LLM-driven multi-agents. Third, we provide a preliminary evaluation of AgentFM on Apache IoTDB, demonstrating its feasibility and effectiveness.", + "bbox": [ + 81, + 623, + 480, + 719 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 PRELIMINARY EMPIRCAL STUDY", + "text_level": 1, + "bbox": [ + 83, + 734, + 398, + 748 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this section, we conduct a preliminary empirical study using Apache IoTDB [14], focusing on the significance of different roles in the failure management process.", + "bbox": [ + 81, + 752, + 480, + 794 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 System Roles", + "text_level": 1, + "bbox": [ + 83, + 808, + 235, + 823 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We manually injected anomalies (excessive data export) into Apache IoTDB during runtime and evaluate the anomaly detection performance (precision, recall and f1-score) for each node using PLELog [15], which is a state-of-the-art log-based anomaly detection method.", + "bbox": [ + 81, + 825, + 480, + 895 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/1eaf2908e2b5e1be5916fea854d4db8f8c6eed97d6b2241d3eb2755794a30451.jpg", + "table_caption": [ + "Table 1: Anomaly Detection Results for Each Node" + ], + "table_footnote": [], + "table_body": "
Node1Node2Node3Node4Node5Node6
p39.68%34.35%59.64%31.17%77.60%89.42%
r99.01%100.00%98.02%100.00%96.04%92.08%
f156.66%51.14%74.16%47.53%85.84%90.73%
", + "bbox": [ + 522, + 131, + 903, + 202 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "As shown in Table 1, $\\text{Node}_6$ achieves the best anomaly detection performance due to hosting the largest number of leader partitions. In contrast, the detection performance on other nodes is relatively suboptimal. This observation underscores the differing significance of various system roles.", + "bbox": [ + 511, + 220, + 911, + 290 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2 Data Roles", + "text_level": 1, + "bbox": [ + 514, + 301, + 645, + 315 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We further conduct anomaly diagnosis classification experiments on Apache IoTDB using both metrics data and log data. To ensure fairness, we do not adopt state-of-the-art methods; instead, we implement a simple classification algorithm based on the Dynamic Time Warping (DTW) algorithm.", + "bbox": [ + 511, + 320, + 911, + 388 + ], + "page_idx": 1 + }, + { + "type": "table", + "img_path": "images/4ab02666500b97395a6c8de3ce496b691bb00849e2af93ec40212bc1d38518a3.jpg", + "table_caption": [ + "Table 2: Anomalies that can be Classified by Metrics and Logs" + ], + "table_footnote": [], + "table_body": "
SourceCPUMemoryExportImportConfiguration
Metrics
Logs
", + "bbox": [ + 517, + 430, + 911, + 491 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "As shown in Table 2, anomalies with an F1-score above $50\\%$ are considered identifiable by the corresponding data type. The results reveal that metrics are particularly effective at detecting resource anomalies, such as CPU and memory saturation, while logs excel at identifying internal database issues, including excessive data export/import and configuration errors. This observation highlights the distinct significance of different data roles.", + "bbox": [ + 511, + 506, + 911, + 602 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3 METHODOLOGY", + "text_level": 1, + "bbox": [ + 514, + 614, + 686, + 628 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our preliminary empirical study highlights that the effectiveness of failure management varies significantly across different roles in distributed databases. Consequently, it becomes crucial to assign varying levels of importance to different roles at different times and adopt role-specific operations to optimize outcomes.", + "bbox": [ + 511, + 632, + 911, + 700 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this section, we introduce AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. Figure 1 illustrates the architecture of AgentFM, which comprises three types of agents corresponding to distinct roles: system agents, data agents, and task agents. System agents represent the various roles of nodes within the distributed database, data agents handle information from diverse data sources, and task agents focus on executing specific failure management tasks. Notably, since system agents and data agents operate independently on each node, they are collectively referred to as standalone agents. Additionally, the framework incorporates a meta-agent, which is responsible for orchestrating, adapting, aggregating, and ultimately producing the final results of the agents' operations, ensuring cohesive and efficient failure management.", + "bbox": [ + 511, + 702, + 913, + 896 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "FSE '25, June 23-28, 2025, Trondheim, Norway", + "bbox": [ + 84, + 75, + 303, + 85 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Lingzhe Zhang et al.", + "bbox": [ + 812, + 75, + 911, + 85 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/7ef0973ddf67eba3d8f31da2d5321ac881ef3abb0947fe701af8f7be32c7d8d4.jpg", + "image_caption": [ + "Figure 1: AgentFM Architecture" + ], + "image_footnote": [], + "bbox": [ + 84, + 104, + 911, + 402 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The framework operates by first extracting critical information from different system agents using the corresponding data agents. This information is then transmitted to a centralized meta-agent for unified decision-making. The decision-making process employs trace data as a cohesive framework to integrate key insights across nodes. Finally, the appropriate task agents execute the required failure management operations, completing the process efficiently and effectively.", + "bbox": [ + 81, + 450, + 482, + 561 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 System Agents", + "text_level": 1, + "bbox": [ + 83, + 578, + 246, + 594 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Different distributed databases assign dynamic system roles that may change during runtime (e.g., leader re-election after node failure). To handle this, we designed an adaptive mechanism for system agents within the Meta Agent.", + "bbox": [ + 81, + 595, + 482, + 651 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As illustrated in Figure 2, the core component is the System Role Manager, which initializes by analyzing configuration files and system docs to identify each node's role and importance. At runtime, it periodically queries the system state and updates roles accordingly.", + "bbox": [ + 81, + 652, + 482, + 720 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Based on this process, each node instantiates a corresponding system agent—such as a Config Agent, Coordinator Agent, or Storage Agent—each embedding multiple data agents to handle specialized tasks.", + "bbox": [ + 81, + 720, + 482, + 775 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2 Data Agents", + "text_level": 1, + "bbox": [ + 83, + 794, + 227, + 809 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this paper, we define two types of data agents: the Metric Agent and the Log Agent. Each adopts a distinct approach to extract key information from the raw metrics data and log data, respectively.", + "bbox": [ + 81, + 811, + 480, + 854 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Metric Agent: The overall workflow of the Metric Agent begins with simple data preprocessing, which includes noise removal and imputation of missing values. After preprocessing, the multivariate", + "bbox": [ + 81, + 854, + 480, + 896 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/e799b8f839733eb02eb2359bbd4dcbe3748cb87fc8ec65c27cef3a711b389358.jpg", + "image_caption": [ + "Figure 2: System Agents Adaptation Workflow" + ], + "image_footnote": [], + "bbox": [ + 516, + 444, + 915, + 664 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "time-series data is converted into natural language descriptions using a large model. The generated natural language descriptions capture key aspects of the data, including time intervals, fluctuation trends, and anomaly points.", + "bbox": [ + 511, + 710, + 913, + 767 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Formally, let $\\mathbf{M} = \\{m_1, m_2, \\dots, m_n\\}$ represent the raw multidimensional metrics data, where each $m_i$ corresponds to a time-series for a specific metric. The preprocessing step can be represented as Equation 1, where $\\mathbf{M}_{\\mathrm{p}}$ is the denoised and imputed version of $M$ .", + "bbox": [ + 511, + 767, + 913, + 835 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {M} _ {\\mathrm {p}} = \\text {P r e p r o c e s s} (\\mathbf {M}) = \\left\\{m _ {1} ^ {\\prime}, m _ {2} ^ {\\prime}, \\dots , m _ {n} ^ {\\prime} \\right\\} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 591, + 849, + 911, + 866 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Next, the processed data is converted into natural language descriptions using a large language model $\\mathcal{L}$ , as shown in Equation 2,", + "bbox": [ + 513, + 867, + 913, + 896 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents", + "bbox": [ + 83, + 75, + 565, + 87 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "FSE '25, June 23-28, 2025, Trondheim, Norway", + "bbox": [ + 691, + 75, + 913, + 87 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\mathbf{D}_{\\mathrm{nl}}$ represents the resulting natural language description, which includes information on time intervals, trends, and identified anomalies in the data.", + "bbox": [ + 81, + 106, + 480, + 148 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {D} _ {\\mathrm {n l}} = \\mathcal {L} \\left(\\mathbf {M} _ {\\mathrm {p}}\\right) \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 236, + 162, + 480, + 176 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Log Agent: Unlike metrics data, log data is written by developers to monitor the internal state of the system, inherently containing rich semantic information. However, log data often includes substantial redundant information, such as repetitive log entries and messages unrelated to system anomalies.", + "bbox": [ + 81, + 179, + 480, + 247 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To address this, the Log Agent comprises two main components: sequence compression and semantic compression. Sequence compression focuses on reducing the raw log sequence by consolidating repetitive log patterns, while semantic compression extracts key operational information from the logs.", + "bbox": [ + 81, + 247, + 480, + 316 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Sequence Compression: This component utilizes a log-parsing algorithm to transform each log entry into a distinct event template consisting of a static constant part and variable parameters. It then merges consecutive occurrences of identical event templates. Formally, for a given raw log sequence $L = \\{l_1, l_2, \\dots, l_N\\}$ , where $l_n$ represents an individual log entry, the parsing process transforms $L$ into $L_e = \\{e_1, e_2, \\dots, e_N\\}$ , where $e_i$ represents an event template. Consecutive identical events are merged, yielding $G' = \\{e_1', e_2', \\dots, e_C'\\}$ , where $C \\ll N$ and $e_i' = e_i \\times c$ represents the event $e_i$ with a count $c$ .", + "bbox": [ + 81, + 316, + 480, + 455 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Semantic Compression: This component employs a prompt-based method to convert lengthy log sequences into descriptions of the database's ongoing operations. Formally, given a sequence of raw log entries $L = \\{l_1, l_2, \\dots, l_N\\}$ , the LLM-based summarization transforms and compresses $L$ into a smaller set of key operational elements $O = \\{o_1, o_2, \\dots, o_M\\}$ , where $M \\ll N$ .", + "bbox": [ + 81, + 455, + 483, + 539 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3 Task Agents", + "text_level": 1, + "bbox": [ + 83, + 550, + 228, + 565 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "After system agents collect necessary data via data agents, the meta agent coordinates task agents to carry out failure management using trace data. This process involves three agent types—detection, diagnosis, and mitigation—which operate sequentially. Detection agents identify anomalies in each time window; upon detection, diagnosis agents locate and classify the issue; finally, mitigation agents propose solutions based on the diagnosis.", + "bbox": [ + 81, + 568, + 480, + 665 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Though prompts vary by task, all agents follow a common RAG+CoT approach, using historical data as labeled examples—normal/abnormal for detection, failure types for diagnosis—to guide reasoning during querying.", + "bbox": [ + 81, + 666, + 483, + 722 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4 PRELIMINARY EVALUATION", + "text_level": 1, + "bbox": [ + 83, + 732, + 357, + 747 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1 Design", + "text_level": 1, + "bbox": [ + 83, + 752, + 184, + 767 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To evaluate AgentFM, we assess its feasibility and effectiveness in Apache IoTDB. We manually injected 10 types of anomalies, including CPU saturation, IO saturation, memory saturation, network delay increase, network bandwidth limitation, network partition occurrence, workload spikes, accompanying slow queries, excessive data export, and excessive data import. Each anomaly type is injected 20 times.", + "bbox": [ + 81, + 770, + 480, + 866 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The evaluation is conducted based on Qwen2.5-72b to assess the results of anomaly detection and diagnosis. The performance is", + "bbox": [ + 81, + 867, + 480, + 896 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "measured using precision, recall, and F1-score metrics. Additionally, the mitigation solutions generated are manually analyzed to determine their usefulness.", + "bbox": [ + 513, + 106, + 915, + 148 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2 Results", + "text_level": 1, + "bbox": [ + 514, + 161, + 620, + 175 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We utilize the function call feature to guide the model in generating \"yes\" or \"no\" responses to evaluate anomaly detection performance. For failure diagnosis, we manually review the LLM-generated results to identify the classification most closely matching the pre-injected anomalies and assessed the outcomes accordingly.", + "bbox": [ + 513, + 179, + 915, + 250 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/82e7fce4ff67186806eecc16a65a5aafad2abe658c41fa7061b4b2c82185027f.jpg", + "table_caption": [ + "Table 3: Anomaly Detection & Diagnosis Results" + ], + "table_footnote": [], + "table_body": "
TaskPrecisionRecallF1-Score
Anomaly Detection95.14%97.03%95.76%
Failure Diagnosis89.61%87.04%87.62%
", + "bbox": [ + 547, + 292, + 880, + 349 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As shown in Table 3, AgentFM demonstrates promising performance in our preliminary experiments. However, based on our observations, the model struggles to identify correct results when dealing with complex internal anomalies (e.g., excessive data import). We plan to focus on optimizing performance for such scenarios in future work.", + "bbox": [ + 513, + 369, + 915, + 453 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Increase CPU Resources:", + "text_level": 1, + "bbox": [ + 522, + 479, + 692, + 491 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Iotdb0 frequently experiences high CPU usage, consider increasing the number of CPU cores allocated to the container or raising the CPU limit.", + "bbox": [ + 522, + 493, + 905, + 532 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Load Balancing:", + "text_level": 1, + "bbox": [ + 522, + 535, + 635, + 547 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "If iotdb0 is part of a cluster, consider distributing some of the load to other nodes to achieve load balancing.", + "bbox": [ + 522, + 549, + 903, + 575 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Figure 3: Sample Mitigation Solutions from AgentFM", + "bbox": [ + 535, + 603, + 890, + 618 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We also conduct a manual analysis of the mitigation solutions recommended by AgentFM. One example is illustrated in Figure 3. We extract the key information from the solution and find that these recommendations provide targeted and effective assistance in system remediation.", + "bbox": [ + 513, + 638, + 915, + 709 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "5 CONCLUSION", + "text_level": 1, + "bbox": [ + 514, + 720, + 663, + 734 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This paper investigates the varying significance of different roles in failure management. Building on this insight, we propose AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. Our preliminary experiments demonstrate the feasibility of AgentFM. In the future, we plan to further enhance AgentFM's capabilities in automating remediation execution.", + "bbox": [ + 513, + 739, + 915, + 835 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "ACKNOWLEDGEMENT", + "text_level": 1, + "bbox": [ + 514, + 849, + 714, + 862 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This work is supported by Key-Area Research and Development Program of Guangdong Province, China (NO.2020B010164003).", + "bbox": [ + 513, + 867, + 913, + 896 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "FSE '25, June 23-28, 2025, Trondheim, Norway", + "bbox": [ + 83, + 75, + 307, + 87 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Lingzhe Zhang et al.", + "bbox": [ + 812, + 75, + 911, + 87 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "REFERENCES", + "text_level": 1, + "bbox": [ + 84, + 104, + 202, + 119 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Toufique Ahmed, Supriyo Ghosh, Chetan Bansal, Thomas Zimmermann, Xuchao Zhang, and Saravan Rajmohan. 2023. Recommending root-cause and mitigation steps for cloud incidents using large language models. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1737-1749.", + "[2] James C Corbett, Jeffrey Dean, Michael Epstein, Andrew Fikes, Christopher Frost, Jeffrey John Furman, Sanjay Ghemawat, Andrey Gubarev, Christopher Heiser, Peter Hochschild, et al. 2013. Spanner: Google's globally distributed database. ACM Transactions on Computer Systems (TOCS) 31, 3 (2013), 1-22.", + "[3] Drishti Goel, Fiza Husain, Aditya Singh, Supriyo Ghosh, Anjaly Parayil, Chetan Bansal, Xuchao Zhang, and Saravan Rajmohan. 2024. X-lifecycle learning for cloud incident management using llms. In Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering. 417-428.", + "[4] Adha Hrusto, Per Runeson, and Magnus C Ohlsson. 2024. Autonomous monitors for detecting failures early and reporting interpretable alerts in cloud operations. In Proceedings of the 46th International Conference on Software Engineering: Software Engineering in Practice. 47-57.", + "[5] Dongxu Huang, Qi Liu, Qiu Cui, Zhuhe Fang, Xiaoyu Ma, Fei Xu, Li Shen, Liu Tang, Yuxing Zhou, Menglong Huang, et al. 2020. TiDB: a Raft-based HTAP database. Proceedings of the VLDB Endowment 13, 12 (2020), 3072-3084.", + "[6] Jun Huang, Yang Yang, Hang Yu, Jianguo Li, and Xiao Zheng. 2023. Twin graph-based anomaly detection via attentive multi-modal learning for microservice system. In 2023 38th IEEE/ACM International Conference on Automated Software Engineering (ASE). IEEE, 66-78.", + "[7] Yuyuan Kang, Xiangdong Huang, Shaoxu Song, Lingzhe Zhang, Jialin Qiao, Chen Wang, Jianmin Wang, and Julian Feinauer. 2022. Separation or not: On handing out-of-order time-series data in leveled LSM-tree. In 2022 IEEE 38th International Conference on Data Engineering (ICDE). IEEE, 3340-3352.", + "[8] Cheryl Lee, Tianyi Yang, Zhuangbin Chen, Yuxin Su, and Michael R Lyu. 2023. Eadro: An end-to-end troubleshooting framework for microservices on multi-source data. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1750-1762.", + "[9] Cheryl Lee, Tianyi Yang, Zhuangbin Chen, Yuxin Su, Yongqiang Yang, and Michael R Lyu. 2023. Heterogeneous anomaly detection for software systems via semi-supervised cross-modal attention. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1724-1736.", + "[10] Cheng-Ming Lin, Ching Chang, Wei-Yao Wang, Kuang-Da Wang, and Wen-Chih Peng. 2024. Root Cause Analysis in Microservice Using Neural Granger Causal Discovery. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 206-213.", + "[11] Minghua Ma, Zheng Yin, Shenglin Zhang, Sheng Wang, Christopher Zheng, Xinhao Jiang, Hanwen Hu, Cheng Luo, Yilin Li, Nengjun Qiu, et al. 2020. Diagnosing root causes of intermittent slow queries in cloud databases. Proceedings of the VLDB Endowment 13, 8 (2020), 1176-1189.", + "[12] Devjeet Roy, Xuchao Zhang, Rashi Bhave, Chetan Bansal, Pedro Las-Casas, Rodrigo Fonseca, and Saravan Rajmohan. 2024. Exploring IIm-based agents for root cause analysis. In *Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering.* 208-219.", + "[13] Manish Shetty, Yinfang Chen, Gagan Somashekar, Minghua Ma, Yogesh Simmhan, Xuchao Zhang, Jonathan Mace, Dax Vandevoorde, Pedro Las-Casas, Shachee Mishra Gupta, et al. 2024. Building AI Agents for Autonomous Clouds: Challenges and Design Principles. In Proceedings of the 2024 ACM Symposium on Cloud Computing, 99-110.", + "[14] Chen Wang, Xiangdong Huang, Jialin Qiao, Tian Jiang, Lei Rui, Jinrui Zhang, Rong Kang, Julian Feinauer, Kevin A McGrail, Peng Wang, et al. 2020. Apache iotdb: time-series database for internet of things. Proceedings of the VLDB Endowment 13, 12 (2020), 2901-2904.", + "[15] Lin Yang, Junjie Chen, Zan Wang, Weijing Wang, Jiajun Jiang, Xuyuan Dong, and Wenbin Zhang. 2021. Semi-supervised log-based anomaly detection via probabilistic label estimation. In 2021 IEEE/ACM 43rd International Conference on" + ], + "bbox": [ + 86, + 122, + 480, + 727 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Software Engineering (ICSE). IEEE, 1448-1460.", + "[16] Zhenkun Yang, Chuanhui Yang, Fusheng Han, Mingqiang Zhuang, Bing Yang, Zhifeng Yang, Xiaojun Cheng, Yuzhong Zhao, Wenhui Shi, Huafeng Xi, et al. 2022. OceanBase: a 707 million tpmC distributed relational database system. Proceedings of the VLDB Endowment 15, 12 (2022), 3385-3397.", + "[17] Chenxi Zhang, Xin Peng, Chaofeng Sha, Ke Zhang, Zhenqing Fu, Xiya Wu, Qingwei Lin, and Dongmei Zhang. 2022. Deepralog: Trace-log combined microservice anomaly detection through graph-based deep learning. In Proceedings of the 44th international conference on software engineering. 623-634.", + "[18] Dylan Zhang, Xuchao Zhang, Chetan Bansal, Pedro Las-Casas, Rodrigo Fonseca, and Saravan Rajmohan. 2024. LM-PACE: Confidence estimation by large language models for effective root causing of cloud incidents. In *Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering*, 388-398.", + "[19] Lingzhe Zhang, Tong Jia, Mengxi Jia, Ying Li, Yong Yang, and Zhonghai Wu. 2024. Multivariate Log-based Anomaly Detection for Distributed Database. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4256-4267.", + "[20] Lingzhe Zhang, Tong Jia, Mengxi Jia, Hongyi Liu, Yong Yang, Zhonghai Wu, and Ying Li. 2024. Towards Close-To-Zero Runtime Collection Overhead: Raft-Based Anomaly Diagnosis on System Faults for Distributed Storage System. IEEE Transactions on Services Computing (2024).", + "[21] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Aiwei Liu, Yong Yang, Zhonghai Wu, Xuming Hu, Philip S Yu, and Ying Li. 2024. A Survey of AIOps for Failure Management in the Era of Large Language Models. arXiv preprint arXiv:2406.11213 (2024).", + "[22] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Hongyi Liu, and Ying Li. 2025. ScalaLog: Scalable Log-Based Failure Diagnosis Using LLM. In ICASSP 2025-2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 1-5.", + "[23] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Hongyi Liu, and Ying Li. 2025. XRAGLog: A Resource-Efficient and Context-Aware Log-Based Anomaly Detection Method Using Retrieval-Augmented Generation. In AAAI 2025 Workshop on Preventing and Detecting LLM Misinformation (PDLM).", + "[24] Lingzhe Zhang, Tong Jia, Kangjin Wang, Mengxi Jia, Yong Yang, and Ying Li. 2024. Reducing Events to Augment Log-based Anomaly Detection Models: An Empirical Study. In Proceedings of the 18th ACM/IEEE International Symposium on Empirical Software Engineering and Measurement. 538-548.", + "[25] Ling-Zhe Zhang, Xiang-Dong Huang, Yan-Kai Wang, Jia-Lin Qiao, Shao-Xu Song, and Jian-Min Wang. 2024. Time-tired compaction: An elastic compaction scheme for LSM-tree based time-series database. Advanced Engineering Informatics 59 (2024), 102224.", + "[26] Shenglin Zhang, Pengxiang Jin, Zihan Lin, Yongqian Sun, Bicheng Zhang, Sibo Xia, Zhengdan Li, Zhenyu Zhong, Minghua Ma, Wa Jin, et al. 2023. Robust failure diagnosis of microservice system through multimodal data. IEEE Transactions on Services Computing 16, 6 (2023), 3851-3864.", + "[27] Wei Zhang, Hongcheng Guo, Jian Yang, Yi Zhang, Chaoran Yan, Zhoujin Tian, Hangyuan Ji, Zhoujun Li, Tongliang Li, Tieqiao Zheng, et al. 2024. mABC: multi-Agent Blockchain-Inspired Collaboration for root cause analysis in micro-services architecture. arXiv preprint arXiv:2404.12135 (2024).", + "[28] Xuchao Zhang, Supriyo Ghosh, Chetan Bansal, Rujia Wang, Minghua Ma, Yu Kang, and Saravan Rajmohan. 2024. Automated root causing of cloud incidents using in-context learning with gpt-4. In Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering. 266-277.", + "[29] Nengwen Zhao, Junjie Chen, Zhaoyang Yu, Honglin Wang, Jiesong Li, Bin Qiu, Hongyu Xu, Wenchi Zhang, Kaixin Sui, and Dan Pei. 2021. Identifying bad software changes via multimodal anomaly detection for online service systems. In Proceedings of the 29th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering. 527-539.", + "[30] Lecheng Zheng, Zhengzhang Chen, Jingrui He, and Haifeng Chen. 2024. Multimodal Causal Structure Learning and Root Cause Analysis. arXiv preprint arXiv:2402.02357 (2024)." + ], + "bbox": [ + 516, + 109, + 913, + 731 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents", + "bbox": [ + 83, + 75, + 563, + 87 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "FSE '25, June 23-28, 2025, Trondheim, Norway", + "bbox": [ + 692, + 75, + 911, + 87 + ], + "page_idx": 4 + } +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06614/e7bb7c32-5113-4b8e-ba10-d1799d972e03_model.json b/data/2025/2504_06xxx/2504.06614/e7bb7c32-5113-4b8e-ba10-d1799d972e03_model.json new file mode 100644 index 0000000000000000000000000000000000000000..d72c53983a49ed00e0d030c6b5febace52630daa --- /dev/null +++ b/data/2025/2504_06xxx/2504.06614/e7bb7c32-5113-4b8e-ba10-d1799d972e03_model.json @@ -0,0 +1,1541 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.154, + 0.102, + 0.844, + 0.152 + ], + "angle": 0, + "content": "AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents" + }, + { + "type": "text", + "bbox": [ + 0.129, + 0.164, + 0.335, + 0.226 + ], + "angle": 0, + "content": "Lingzhe Zhang† \nPeking University \nBeijing, China \nzhang.lingzhe@stu.pku.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.381, + 0.164, + 0.619, + 0.227 + ], + "angle": 0, + "content": "Yunpeng Zhai† \nAlibaba Group \nBeijing, China \nzhaiyunpeng.zyp@alibaba-inc.com" + }, + { + "type": "text", + "bbox": [ + 0.696, + 0.165, + 0.838, + 0.227 + ], + "angle": 0, + "content": "Tong Jia* \nPeking University \nBeijing, China \njia.tong@oku.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.162, + 0.24, + 0.3, + 0.301 + ], + "angle": 0, + "content": "Xiaosong Huang \nPeking University \nBeijing, China \nhxs@stu.pku.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.398, + 0.239, + 0.6, + 0.302 + ], + "angle": 0, + "content": "Chiming Duan \nPeking University \nBeijing, China \nduanchiming@stu.pku.edu.cn" + }, + { + "type": "text", + "bbox": [ + 0.701, + 0.239, + 0.834, + 0.302 + ], + "angle": 0, + "content": "Ying Li* \nPeking University \nBeijing, China \nli.ying@pku.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.31, + 0.184, + 0.323 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.33, + 0.484, + 0.522 + ], + "angle": 0, + "content": "Distributed databases are critical infrastructures for today's large-scale software systems, making effective failure management essential to ensure software availability. However, existing approaches often overlook the role distinctions within distributed databases and rely on small-scale models with limited generalization capabilities. In this paper, we conduct a preliminary empirical study to emphasize the unique significance of different roles. Building on this insight, we propose AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. AgentFM addresses failure management by considering system roles, data roles, and task roles, with a meta-agent orchestrating these components. Preliminary evaluations using Apache IoTDB demonstrate the effectiveness of AgentFM and open new directions for further research." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.537, + 0.221, + 0.551 + ], + "angle": 0, + "content": "CCS CONCEPTS" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.556, + 0.456, + 0.57 + ], + "angle": 0, + "content": "- Software and its engineering \\(\\rightarrow\\) Maintaining software." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.584, + 0.191, + 0.598 + ], + "angle": 0, + "content": "KEYWORDS" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.603, + 0.436, + 0.617 + ], + "angle": 0, + "content": "Failure Management, Distributed Databases, Multi Agents" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.626, + 0.231, + 0.638 + ], + "angle": 0, + "content": "ACM Reference Format:" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.639, + 0.483, + 0.715 + ], + "angle": 0, + "content": "Lingzhe Zhang†, Yunpeng Zhai†, Tong Jia*, Xiaosong Huang, Chiming Duan, and Ying Li*. 2025. AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents. In 33rd ACM International Conference on the Foundations of Software Engineering (FSE '25), June 23–28, 2025, Trondheim, Norway. ACM, New York, NY, USA, 5 pages. https://doi.org/10.1145/3696630.3728492" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.736, + 0.184, + 0.747 + ], + "angle": 0, + "content": "\\(\\dagger\\) Equal contribution." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.747, + 0.199, + 0.759 + ], + "angle": 0, + "content": "*Corresponding author." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.736, + 0.199, + 0.759 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.781, + 0.483, + 0.853 + ], + "angle": 0, + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.853, + 0.3, + 0.864 + ], + "angle": 0, + "content": "FSE '25, June 23-28, 2025, Trondheim, Norway" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.865, + 0.473, + 0.875 + ], + "angle": 0, + "content": "© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.875, + 0.295, + 0.884 + ], + "angle": 0, + "content": "ACM ISBN 979-8-4007-1276-0/25/06...$15.00" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.885, + 0.273, + 0.896 + ], + "angle": 0, + "content": "https://doi.org/10.1145/3696630.3728492" + }, + { + "type": "title", + "bbox": [ + 0.516, + 0.31, + 0.688, + 0.324 + ], + "angle": 0, + "content": "1 INTRODUCTION" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.329, + 0.915, + 0.384 + ], + "angle": 0, + "content": "The distributed databases, such as Google Spanner [2], Alibaba OceanBase [16], TiDB [5], and Apache IoTDB [14], have become integral components of cloud infrastructures, handling vast volumes of data [7, 25]." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.385, + 0.915, + 0.495 + ], + "angle": 0, + "content": "However, these systems frequently encounter anomalies such as system failures and performance degradation, leading to significant financial losses. For example, Alibaba Cloud faces Intermittent Slow Queries (iSQs) [11], leading to billions of dollars in annual losses. Amazon reports that even a 0.1-second delay in loading caused by database anomalies can lead to a \\(1\\%\\) increase in financial losses [19]. Therefore, it is crucial to detect system failures in real time, analyze the root causes of these failures, and automatically remediate them." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.496, + 0.915, + 0.731 + ], + "angle": 0, + "content": "System traces, metrics, and logs capture the states and critical events of active processes, making them essential for managing software failures. These data sources provide insights into both normal operations and deviations signaling potential failures. Leveraging their multimodal nature, recent research has enhanced anomaly detection and diagnosis in complex systems [6, 8-10, 17, 20-24, 26, 29, 30]. For example, Eadro [8] integrates anomaly detection and root cause localization using multi-source data, while AnoFusion [26] employs unsupervised multimodal failure detection in microservices. MSTGAD [6] combines all three data types with attentive multimodal learning for graph-based anomaly detection. Studies also explore two-source combinations, such as DeepTraLog [17], which pairs traces with logs using a GGNN-based model, and SCWarn [29], which identifies anomalies via heterogeneous metrics and logs. Although the effectiveness of these methods has been demonstrated in specific scenarios, they face several practical challenges when applied to distributed databases:" + }, + { + "type": "text", + "bbox": [ + 0.542, + 0.743, + 0.914, + 0.841 + ], + "angle": 0, + "content": "- Role Awareness. In distributed databases, nodes play different roles, each with varying levels of importance. Effective failure management often requires coordination and collaboration among multiple nodes. However, current methods completely overlook these roles, leading to suboptimal fault detection, inaccurate root cause diagnosis, and an inability to provide critical repair strategies." + }, + { + "type": "text", + "bbox": [ + 0.542, + 0.841, + 0.914, + 0.897 + ], + "angle": 0, + "content": "- Application Limitations. Existing failure management models lack generalizability. For instance, these models are typically trained on specific systems and encounter concept drift when applied to new systems. Moreover, since these" + }, + { + "type": "list", + "bbox": [ + 0.542, + 0.743, + 0.914, + 0.897 + ], + "angle": 0, + "content": null + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.276, + 0.059, + 0.701 + ], + "angle": 270, + "content": "arXiv:2504.06614v1 [cs.SE] 9 Apr 2025" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.085, + 0.076, + 0.305, + 0.087 + ], + "angle": 0, + "content": "FSE '25, June 23-28, 2025, Trondheim, Norway" + }, + { + "type": "header", + "bbox": [ + 0.813, + 0.076, + 0.912, + 0.087 + ], + "angle": 0, + "content": "Lingzhe Zhang et al." + }, + { + "type": "text", + "bbox": [ + 0.125, + 0.108, + 0.481, + 0.162 + ], + "angle": 0, + "content": "methods often frame the task as a classification problem, the resulting outputs lack interpretability, which is crucial for Operations and Control Engineers (OCEs) to effectively resolve issues." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.167, + 0.482, + 0.291 + ], + "angle": 0, + "content": "To address the first challenge, we conduct a detailed analysis of the various roles within a distributed database and the entire failure management process. We ultimately identify three distinct role categories: system roles, data roles, and task roles. System roles represent the various roles inherent in the distributed database itself (e.g., leader nodes, follower nodes). Data roles refer to the different data sources involved in failure management, while task roles represent the various tasks that need to be executed during failure management." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.292, + 0.482, + 0.415 + ], + "angle": 0, + "content": "To tackle the second challenge, we adopt a large language model (LLM)-based approach. While many existing LLM-based failure management solutions have been proposed, a significant number of them do not utilize the three types of data sources mentioned above [1, 3, 12, 18, 28]. Moreover, some approaches that do incorporate these data sources fail to account for the role-based structure within distributed databases [4, 13, 27]. Therefore, we propose a role-aware, LLM-driven multi-agent approach that integrates the characteristics of these roles and the three data sources." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.417, + 0.482, + 0.554 + ], + "angle": 0, + "content": "Building on these insights, we introduce AgentFM, a comprehensive role-aware failure management framework for distributed databases, powered by LLM-driven multi-agent systems. AgentFM integrates the unique characteristics of distributed database roles with the rich multimodal data sources typically encountered in failure management, such as system traces, metrics, and logs. By employing a multi-agent architecture, AgentFM facilitates specialized agents for each role—system roles, data roles, and task roles—ensuring a more nuanced and effective approach to failure detection, diagnosis, and resolution." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.555, + 0.482, + 0.623 + ], + "angle": 0, + "content": "We conduct preliminary experiments on Apache IoTDB [14], a distributed time-series database system, to assess the effectiveness of AgentFM in failure detection and root cause analysis. Furthermore, we manually verified the accuracy and validity of the generated mitigation solution." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.624, + 0.482, + 0.72 + ], + "angle": 0, + "content": "Contributions. The contributions of this paper are threefold. First, we conduct a preliminary empirical study highlighting the varying significance of different roles in failure management. Second, we propose AgentFM, a role-aware failure management framework for distributed databases with LLM-driven multi-agents. Third, we provide a preliminary evaluation of AgentFM on Apache IoTDB, demonstrating its feasibility and effectiveness." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.735, + 0.4, + 0.749 + ], + "angle": 0, + "content": "2 PRELIMINARY EMPIRCAL STUDY" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.753, + 0.482, + 0.795 + ], + "angle": 0, + "content": "In this section, we conduct a preliminary empirical study using Apache IoTDB [14], focusing on the significance of different roles in the failure management process." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.809, + 0.236, + 0.824 + ], + "angle": 0, + "content": "2.1 System Roles" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.827, + 0.482, + 0.896 + ], + "angle": 0, + "content": "We manually injected anomalies (excessive data export) into Apache IoTDB during runtime and evaluate the anomaly detection performance (precision, recall and f1-score) for each node using PLELog [15], which is a state-of-the-art log-based anomaly detection method." + }, + { + "type": "table_caption", + "bbox": [ + 0.545, + 0.105, + 0.884, + 0.119 + ], + "angle": 0, + "content": "Table 1: Anomaly Detection Results for Each Node" + }, + { + "type": "table", + "bbox": [ + 0.524, + 0.132, + 0.905, + 0.203 + ], + "angle": 0, + "content": "
Node1Node2Node3Node4Node5Node6
p39.68%34.35%59.64%31.17%77.60%89.42%
r99.01%100.00%98.02%100.00%96.04%92.08%
f156.66%51.14%74.16%47.53%85.84%90.73%
" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.221, + 0.913, + 0.291 + ], + "angle": 0, + "content": "As shown in Table 1, \\( \\text{Node}_6 \\) achieves the best anomaly detection performance due to hosting the largest number of leader partitions. In contrast, the detection performance on other nodes is relatively suboptimal. This observation underscores the differing significance of various system roles." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.303, + 0.647, + 0.316 + ], + "angle": 0, + "content": "2.2 Data Roles" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.321, + 0.913, + 0.39 + ], + "angle": 0, + "content": "We further conduct anomaly diagnosis classification experiments on Apache IoTDB using both metrics data and log data. To ensure fairness, we do not adopt state-of-the-art methods; instead, we implement a simple classification algorithm based on the Dynamic Time Warping (DTW) algorithm." + }, + { + "type": "table_caption", + "bbox": [ + 0.515, + 0.403, + 0.913, + 0.417 + ], + "angle": 0, + "content": "Table 2: Anomalies that can be Classified by Metrics and Logs" + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.431, + 0.912, + 0.492 + ], + "angle": 0, + "content": "
SourceCPUMemoryExportImportConfiguration
Metrics
Logs
" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.507, + 0.913, + 0.603 + ], + "angle": 0, + "content": "As shown in Table 2, anomalies with an F1-score above \\(50\\%\\) are considered identifiable by the corresponding data type. The results reveal that metrics are particularly effective at detecting resource anomalies, such as CPU and memory saturation, while logs excel at identifying internal database issues, including excessive data export/import and configuration errors. This observation highlights the distinct significance of different data roles." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.615, + 0.687, + 0.629 + ], + "angle": 0, + "content": "3 METHODOLOGY" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.633, + 0.913, + 0.702 + ], + "angle": 0, + "content": "Our preliminary empirical study highlights that the effectiveness of failure management varies significantly across different roles in distributed databases. Consequently, it becomes crucial to assign varying levels of importance to different roles at different times and adopt role-specific operations to optimize outcomes." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.703, + 0.915, + 0.897 + ], + "angle": 0, + "content": "In this section, we introduce AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. Figure 1 illustrates the architecture of AgentFM, which comprises three types of agents corresponding to distinct roles: system agents, data agents, and task agents. System agents represent the various roles of nodes within the distributed database, data agents handle information from diverse data sources, and task agents focus on executing specific failure management tasks. Notably, since system agents and data agents operate independently on each node, they are collectively referred to as standalone agents. Additionally, the framework incorporates a meta-agent, which is responsible for orchestrating, adapting, aggregating, and ultimately producing the final results of the agents' operations, ensuring cohesive and efficient failure management." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.566, + 0.088 + ], + "angle": 0, + "content": "AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents" + }, + { + "type": "header", + "bbox": [ + 0.692, + 0.076, + 0.914, + 0.088 + ], + "angle": 0, + "content": "FSE '25, June 23-28, 2025, Trondheim, Norway" + }, + { + "type": "image", + "bbox": [ + 0.086, + 0.105, + 0.913, + 0.403 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.39, + 0.415, + 0.607, + 0.43 + ], + "angle": 0, + "content": "Figure 1: AgentFM Architecture" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.451, + 0.483, + 0.562 + ], + "angle": 0, + "content": "The framework operates by first extracting critical information from different system agents using the corresponding data agents. This information is then transmitted to a centralized meta-agent for unified decision-making. The decision-making process employs trace data as a cohesive framework to integrate key insights across nodes. Finally, the appropriate task agents execute the required failure management operations, completing the process efficiently and effectively." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.579, + 0.248, + 0.595 + ], + "angle": 0, + "content": "3.1 System Agents" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.597, + 0.483, + 0.652 + ], + "angle": 0, + "content": "Different distributed databases assign dynamic system roles that may change during runtime (e.g., leader re-election after node failure). To handle this, we designed an adaptive mechanism for system agents within the Meta Agent." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.653, + 0.483, + 0.722 + ], + "angle": 0, + "content": "As illustrated in Figure 2, the core component is the System Role Manager, which initializes by analyzing configuration files and system docs to identify each node's role and importance. At runtime, it periodically queries the system state and updates roles accordingly." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.722, + 0.483, + 0.776 + ], + "angle": 0, + "content": "Based on this process, each node instantiates a corresponding system agent—such as a Config Agent, Coordinator Agent, or Storage Agent—each embedding multiple data agents to handle specialized tasks." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.795, + 0.228, + 0.81 + ], + "angle": 0, + "content": "3.2 Data Agents" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.813, + 0.482, + 0.855 + ], + "angle": 0, + "content": "In this paper, we define two types of data agents: the Metric Agent and the Log Agent. Each adopts a distinct approach to extract key information from the raw metrics data and log data, respectively." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.855, + 0.482, + 0.897 + ], + "angle": 0, + "content": "Metric Agent: The overall workflow of the Metric Agent begins with simple data preprocessing, which includes noise removal and imputation of missing values. After preprocessing, the multivariate" + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.445, + 0.916, + 0.665 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.557, + 0.675, + 0.871, + 0.69 + ], + "angle": 0, + "content": "Figure 2: System Agents Adaptation Workflow" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.712, + 0.914, + 0.768 + ], + "angle": 0, + "content": "time-series data is converted into natural language descriptions using a large model. The generated natural language descriptions capture key aspects of the data, including time intervals, fluctuation trends, and anomaly points." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.768, + 0.915, + 0.836 + ], + "angle": 0, + "content": "Formally, let \\(\\mathbf{M} = \\{m_1, m_2, \\dots, m_n\\}\\) represent the raw multidimensional metrics data, where each \\(m_i\\) corresponds to a time-series for a specific metric. The preprocessing step can be represented as Equation 1, where \\(\\mathbf{M}_{\\mathrm{p}}\\) is the denoised and imputed version of \\(M\\)." + }, + { + "type": "equation", + "bbox": [ + 0.592, + 0.851, + 0.913, + 0.867 + ], + "angle": 0, + "content": "\\[\n\\mathbf {M} _ {\\mathrm {p}} = \\text {P r e p r o c e s s} (\\mathbf {M}) = \\left\\{m _ {1} ^ {\\prime}, m _ {2} ^ {\\prime}, \\dots , m _ {n} ^ {\\prime} \\right\\} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.868, + 0.915, + 0.897 + ], + "angle": 0, + "content": "Next, the processed data is converted into natural language descriptions using a large language model \\(\\mathcal{L}\\), as shown in Equation 2," + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.308, + 0.088 + ], + "angle": 0, + "content": "FSE '25, June 23-28, 2025, Trondheim, Norway" + }, + { + "type": "header", + "bbox": [ + 0.813, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "Lingzhe Zhang et al." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.107, + 0.482, + 0.149 + ], + "angle": 0, + "content": "where \\(\\mathbf{D}_{\\mathrm{nl}}\\) represents the resulting natural language description, which includes information on time intervals, trends, and identified anomalies in the data." + }, + { + "type": "equation", + "bbox": [ + 0.238, + 0.163, + 0.482, + 0.178 + ], + "angle": 0, + "content": "\\[\n\\mathbf {D} _ {\\mathrm {n l}} = \\mathcal {L} \\left(\\mathbf {M} _ {\\mathrm {p}}\\right) \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.18, + 0.482, + 0.248 + ], + "angle": 0, + "content": "Log Agent: Unlike metrics data, log data is written by developers to monitor the internal state of the system, inherently containing rich semantic information. However, log data often includes substantial redundant information, such as repetitive log entries and messages unrelated to system anomalies." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.248, + 0.482, + 0.317 + ], + "angle": 0, + "content": "To address this, the Log Agent comprises two main components: sequence compression and semantic compression. Sequence compression focuses on reducing the raw log sequence by consolidating repetitive log patterns, while semantic compression extracts key operational information from the logs." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.318, + 0.482, + 0.456 + ], + "angle": 0, + "content": "Sequence Compression: This component utilizes a log-parsing algorithm to transform each log entry into a distinct event template consisting of a static constant part and variable parameters. It then merges consecutive occurrences of identical event templates. Formally, for a given raw log sequence \\( L = \\{l_1, l_2, \\dots, l_N\\} \\), where \\( l_n \\) represents an individual log entry, the parsing process transforms \\( L \\) into \\( L_e = \\{e_1, e_2, \\dots, e_N\\} \\), where \\( e_i \\) represents an event template. Consecutive identical events are merged, yielding \\( G' = \\{e_1', e_2', \\dots, e_C'\\} \\), where \\( C \\ll N \\) and \\( e_i' = e_i \\times c \\) represents the event \\( e_i \\) with a count \\( c \\)." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.456, + 0.485, + 0.54 + ], + "angle": 0, + "content": "Semantic Compression: This component employs a prompt-based method to convert lengthy log sequences into descriptions of the database's ongoing operations. Formally, given a sequence of raw log entries \\( L = \\{l_1, l_2, \\dots, l_N\\} \\), the LLM-based summarization transforms and compresses \\( L \\) into a smaller set of key operational elements \\( O = \\{o_1, o_2, \\dots, o_M\\} \\), where \\( M \\ll N \\)." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.551, + 0.229, + 0.566 + ], + "angle": 0, + "content": "3.3 Task Agents" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.569, + 0.482, + 0.666 + ], + "angle": 0, + "content": "After system agents collect necessary data via data agents, the meta agent coordinates task agents to carry out failure management using trace data. This process involves three agent types—detection, diagnosis, and mitigation—which operate sequentially. Detection agents identify anomalies in each time window; upon detection, diagnosis agents locate and classify the issue; finally, mitigation agents propose solutions based on the diagnosis." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.667, + 0.485, + 0.723 + ], + "angle": 0, + "content": "Though prompts vary by task, all agents follow a common RAG+CoT approach, using historical data as labeled examples—normal/abnormal for detection, failure types for diagnosis—to guide reasoning during querying." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.733, + 0.358, + 0.748 + ], + "angle": 0, + "content": "4 PRELIMINARY EVALUATION" + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.753, + 0.185, + 0.768 + ], + "angle": 0, + "content": "4.1 Design" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.771, + 0.482, + 0.867 + ], + "angle": 0, + "content": "To evaluate AgentFM, we assess its feasibility and effectiveness in Apache IoTDB. We manually injected 10 types of anomalies, including CPU saturation, IO saturation, memory saturation, network delay increase, network bandwidth limitation, network partition occurrence, workload spikes, accompanying slow queries, excessive data export, and excessive data import. Each anomaly type is injected 20 times." + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.868, + 0.482, + 0.897 + ], + "angle": 0, + "content": "The evaluation is conducted based on Qwen2.5-72b to assess the results of anomaly detection and diagnosis. The performance is" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.107, + 0.916, + 0.149 + ], + "angle": 0, + "content": "measured using precision, recall, and F1-score metrics. Additionally, the mitigation solutions generated are manually analyzed to determine their usefulness." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.162, + 0.622, + 0.176 + ], + "angle": 0, + "content": "4.2 Results" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.18, + 0.916, + 0.25 + ], + "angle": 0, + "content": "We utilize the function call feature to guide the model in generating \"yes\" or \"no\" responses to evaluate anomaly detection performance. For failure diagnosis, we manually review the LLM-generated results to identify the classification most closely matching the pre-injected anomalies and assessed the outcomes accordingly." + }, + { + "type": "table_caption", + "bbox": [ + 0.551, + 0.266, + 0.877, + 0.28 + ], + "angle": 0, + "content": "Table 3: Anomaly Detection & Diagnosis Results" + }, + { + "type": "table", + "bbox": [ + 0.548, + 0.293, + 0.882, + 0.351 + ], + "angle": 0, + "content": "
TaskPrecisionRecallF1-Score
Anomaly Detection95.14%97.03%95.76%
Failure Diagnosis89.61%87.04%87.62%
" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.371, + 0.916, + 0.454 + ], + "angle": 0, + "content": "As shown in Table 3, AgentFM demonstrates promising performance in our preliminary experiments. However, based on our observations, the model struggles to identify correct results when dealing with complex internal anomalies (e.g., excessive data import). We plan to focus on optimizing performance for such scenarios in future work." + }, + { + "type": "title", + "bbox": [ + 0.523, + 0.48, + 0.694, + 0.492 + ], + "angle": 0, + "content": "Increase CPU Resources:" + }, + { + "type": "text", + "bbox": [ + 0.523, + 0.494, + 0.906, + 0.534 + ], + "angle": 0, + "content": "Iotdb0 frequently experiences high CPU usage, consider increasing the number of CPU cores allocated to the container or raising the CPU limit." + }, + { + "type": "title", + "bbox": [ + 0.524, + 0.536, + 0.636, + 0.549 + ], + "angle": 0, + "content": "Load Balancing:" + }, + { + "type": "text", + "bbox": [ + 0.524, + 0.55, + 0.905, + 0.577 + ], + "angle": 0, + "content": "If iotdb0 is part of a cluster, consider distributing some of the load to other nodes to achieve load balancing." + }, + { + "type": "image_caption", + "bbox": [ + 0.536, + 0.604, + 0.892, + 0.619 + ], + "angle": 0, + "content": "Figure 3: Sample Mitigation Solutions from AgentFM" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.64, + 0.916, + 0.71 + ], + "angle": 0, + "content": "We also conduct a manual analysis of the mitigation solutions recommended by AgentFM. One example is illustrated in Figure 3. We extract the key information from the solution and find that these recommendations provide targeted and effective assistance in system remediation." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.722, + 0.664, + 0.736 + ], + "angle": 0, + "content": "5 CONCLUSION" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.74, + 0.916, + 0.837 + ], + "angle": 0, + "content": "This paper investigates the varying significance of different roles in failure management. Building on this insight, we propose AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. Our preliminary experiments demonstrate the feasibility of AgentFM. In the future, we plan to further enhance AgentFM's capabilities in automating remediation execution." + }, + { + "type": "title", + "bbox": [ + 0.515, + 0.85, + 0.715, + 0.863 + ], + "angle": 0, + "content": "ACKNOWLEDGEMENT" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.868, + 0.915, + 0.897 + ], + "angle": 0, + "content": "This work is supported by Key-Area Research and Development Program of Guangdong Province, China (NO.2020B010164003)." + } + ], + [ + { + "type": "header", + "bbox": [ + 0.084, + 0.076, + 0.565, + 0.088 + ], + "angle": 0, + "content": "AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents" + }, + { + "type": "header", + "bbox": [ + 0.693, + 0.076, + 0.913, + 0.088 + ], + "angle": 0, + "content": "FSE '25, June 23-28, 2025, Trondheim, Norway" + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.106, + 0.203, + 0.12 + ], + "angle": 0, + "content": "REFERENCES" + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.123, + 0.482, + 0.164 + ], + "angle": 0, + "content": "[1] Toufique Ahmed, Supriyo Ghosh, Chetan Bansal, Thomas Zimmermann, Xuchao Zhang, and Saravan Rajmohan. 2023. Recommending root-cause and mitigation steps for cloud incidents using large language models. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1737-1749." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.164, + 0.482, + 0.204 + ], + "angle": 0, + "content": "[2] James C Corbett, Jeffrey Dean, Michael Epstein, Andrew Fikes, Christopher Frost, Jeffrey John Furman, Sanjay Ghemawat, Andrey Gubarev, Christopher Heiser, Peter Hochschild, et al. 2013. Spanner: Google's globally distributed database. ACM Transactions on Computer Systems (TOCS) 31, 3 (2013), 1-22." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.205, + 0.482, + 0.254 + ], + "angle": 0, + "content": "[3] Drishti Goel, Fiza Husain, Aditya Singh, Supriyo Ghosh, Anjaly Parayil, Chetan Bansal, Xuchao Zhang, and Saravan Rajmohan. 2024. X-lifecycle learning for cloud incident management using llms. In Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering. 417-428." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.255, + 0.482, + 0.295 + ], + "angle": 0, + "content": "[4] Adha Hrusto, Per Runeson, and Magnus C Ohlsson. 2024. Autonomous monitors for detecting failures early and reporting interpretable alerts in cloud operations. In Proceedings of the 46th International Conference on Software Engineering: Software Engineering in Practice. 47-57." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.295, + 0.482, + 0.326 + ], + "angle": 0, + "content": "[5] Dongxu Huang, Qi Liu, Qiu Cui, Zhuhe Fang, Xiaoyu Ma, Fei Xu, Li Shen, Liu Tang, Yuxing Zhou, Menglong Huang, et al. 2020. TiDB: a Raft-based HTAP database. Proceedings of the VLDB Endowment 13, 12 (2020), 3072-3084." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.326, + 0.482, + 0.365 + ], + "angle": 0, + "content": "[6] Jun Huang, Yang Yang, Hang Yu, Jianguo Li, and Xiao Zheng. 2023. Twin graph-based anomaly detection via attentive multi-modal learning for microservice system. In 2023 38th IEEE/ACM International Conference on Automated Software Engineering (ASE). IEEE, 66-78." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.365, + 0.482, + 0.406 + ], + "angle": 0, + "content": "[7] Yuyuan Kang, Xiangdong Huang, Shaoxu Song, Lingzhe Zhang, Jialin Qiao, Chen Wang, Jianmin Wang, and Julian Feinauer. 2022. Separation or not: On handing out-of-order time-series data in leveled LSM-tree. In 2022 IEEE 38th International Conference on Data Engineering (ICDE). IEEE, 3340-3352." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.406, + 0.482, + 0.446 + ], + "angle": 0, + "content": "[8] Cheryl Lee, Tianyi Yang, Zhuangbin Chen, Yuxin Su, and Michael R Lyu. 2023. Eadro: An end-to-end troubleshooting framework for microservices on multi-source data. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1750-1762." + }, + { + "type": "ref_text", + "bbox": [ + 0.092, + 0.446, + 0.482, + 0.487 + ], + "angle": 0, + "content": "[9] Cheryl Lee, Tianyi Yang, Zhuangbin Chen, Yuxin Su, Yongqiang Yang, and Michael R Lyu. 2023. Heterogeneous anomaly detection for software systems via semi-supervised cross-modal attention. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1724-1736." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.487, + 0.482, + 0.525 + ], + "angle": 0, + "content": "[10] Cheng-Ming Lin, Ching Chang, Wei-Yao Wang, Kuang-Da Wang, and Wen-Chih Peng. 2024. Root Cause Analysis in Microservice Using Neural Granger Causal Discovery. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 206-213." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.526, + 0.482, + 0.567 + ], + "angle": 0, + "content": "[11] Minghua Ma, Zheng Yin, Shenglin Zhang, Sheng Wang, Christopher Zheng, Xinhao Jiang, Hanwen Hu, Cheng Luo, Yilin Li, Nengjun Qiu, et al. 2020. Diagnosing root causes of intermittent slow queries in cloud databases. Proceedings of the VLDB Endowment 13, 8 (2020), 1176-1189." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.567, + 0.482, + 0.607 + ], + "angle": 0, + "content": "[12] Devjeet Roy, Xuchao Zhang, Rashi Bhave, Chetan Bansal, Pedro Las-Casas, Rodrigo Fonseca, and Saravan Rajmohan. 2024. Exploring IIm-based agents for root cause analysis. In *Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering.* 208-219." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.607, + 0.482, + 0.657 + ], + "angle": 0, + "content": "[13] Manish Shetty, Yinfang Chen, Gagan Somashekar, Minghua Ma, Yogesh Simmhan, Xuchao Zhang, Jonathan Mace, Dax Vandevoorde, Pedro Las-Casas, Shachee Mishra Gupta, et al. 2024. Building AI Agents for Autonomous Clouds: Challenges and Design Principles. In Proceedings of the 2024 ACM Symposium on Cloud Computing, 99-110." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.657, + 0.482, + 0.697 + ], + "angle": 0, + "content": "[14] Chen Wang, Xiangdong Huang, Jialin Qiao, Tian Jiang, Lei Rui, Jinrui Zhang, Rong Kang, Julian Feinauer, Kevin A McGrail, Peng Wang, et al. 2020. Apache iotdb: time-series database for internet of things. Proceedings of the VLDB Endowment 13, 12 (2020), 2901-2904." + }, + { + "type": "ref_text", + "bbox": [ + 0.087, + 0.697, + 0.482, + 0.728 + ], + "angle": 0, + "content": "[15] Lin Yang, Junjie Chen, Zan Wang, Weijing Wang, Jiajun Jiang, Xuyuan Dong, and Wenbin Zhang. 2021. Semi-supervised log-based anomaly detection via probabilistic label estimation. In 2021 IEEE/ACM 43rd International Conference on" + }, + { + "type": "list", + "bbox": [ + 0.087, + 0.123, + 0.482, + 0.728 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.541, + 0.11, + 0.758, + 0.12 + ], + "angle": 0, + "content": "Software Engineering (ICSE). IEEE, 1448-1460." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.12, + 0.914, + 0.16 + ], + "angle": 0, + "content": "[16] Zhenkun Yang, Chuanhui Yang, Fusheng Han, Mingqiang Zhuang, Bing Yang, Zhifeng Yang, Xiaojun Cheng, Yuzhong Zhao, Wenhui Shi, Huafeng Xi, et al. 2022. OceanBase: a 707 million tpmC distributed relational database system. Proceedings of the VLDB Endowment 15, 12 (2022), 3385-3397." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.161, + 0.914, + 0.201 + ], + "angle": 0, + "content": "[17] Chenxi Zhang, Xin Peng, Chaofeng Sha, Ke Zhang, Zhenqing Fu, Xiya Wu, Qingwei Lin, and Dongmei Zhang. 2022. Deepralog: Trace-log combined microservice anomaly detection through graph-based deep learning. In Proceedings of the 44th international conference on software engineering. 623-634." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.201, + 0.914, + 0.25 + ], + "angle": 0, + "content": "[18] Dylan Zhang, Xuchao Zhang, Chetan Bansal, Pedro Las-Casas, Rodrigo Fonseca, and Saravan Rajmohan. 2024. LM-PACE: Confidence estimation by large language models for effective root causing of cloud incidents. In *Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering*, 388-398." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.251, + 0.914, + 0.289 + ], + "angle": 0, + "content": "[19] Lingzhe Zhang, Tong Jia, Mengxi Jia, Ying Li, Yong Yang, and Zhonghai Wu. 2024. Multivariate Log-based Anomaly Detection for Distributed Database. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4256-4267." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.289, + 0.914, + 0.329 + ], + "angle": 0, + "content": "[20] Lingzhe Zhang, Tong Jia, Mengxi Jia, Hongyi Liu, Yong Yang, Zhonghai Wu, and Ying Li. 2024. Towards Close-To-Zero Runtime Collection Overhead: Raft-Based Anomaly Diagnosis on System Faults for Distributed Storage System. IEEE Transactions on Services Computing (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.33, + 0.914, + 0.369 + ], + "angle": 0, + "content": "[21] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Aiwei Liu, Yong Yang, Zhonghai Wu, Xuming Hu, Philip S Yu, and Ying Li. 2024. A Survey of AIOps for Failure Management in the Era of Large Language Models. arXiv preprint arXiv:2406.11213 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.37, + 0.914, + 0.41 + ], + "angle": 0, + "content": "[22] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Hongyi Liu, and Ying Li. 2025. ScalaLog: Scalable Log-Based Failure Diagnosis Using LLM. In ICASSP 2025-2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 1-5." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.41, + 0.914, + 0.45 + ], + "angle": 0, + "content": "[23] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Hongyi Liu, and Ying Li. 2025. XRAGLog: A Resource-Efficient and Context-Aware Log-Based Anomaly Detection Method Using Retrieval-Augmented Generation. In AAAI 2025 Workshop on Preventing and Detecting LLM Misinformation (PDLM)." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.45, + 0.914, + 0.491 + ], + "angle": 0, + "content": "[24] Lingzhe Zhang, Tong Jia, Kangjin Wang, Mengxi Jia, Yong Yang, and Ying Li. 2024. Reducing Events to Augment Log-based Anomaly Detection Models: An Empirical Study. In Proceedings of the 18th ACM/IEEE International Symposium on Empirical Software Engineering and Measurement. 538-548." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.491, + 0.914, + 0.531 + ], + "angle": 0, + "content": "[25] Ling-Zhe Zhang, Xiang-Dong Huang, Yan-Kai Wang, Jia-Lin Qiao, Shao-Xu Song, and Jian-Min Wang. 2024. Time-tired compaction: An elastic compaction scheme for LSM-tree based time-series database. Advanced Engineering Informatics 59 (2024), 102224." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.531, + 0.914, + 0.571 + ], + "angle": 0, + "content": "[26] Shenglin Zhang, Pengxiang Jin, Zihan Lin, Yongqian Sun, Bicheng Zhang, Sibo Xia, Zhengdan Li, Zhenyu Zhong, Minghua Ma, Wa Jin, et al. 2023. Robust failure diagnosis of microservice system through multimodal data. IEEE Transactions on Services Computing 16, 6 (2023), 3851-3864." + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.571, + 0.914, + 0.611 + ], + "angle": 0, + "content": "[27] Wei Zhang, Hongcheng Guo, Jian Yang, Yi Zhang, Chaoran Yan, Zhoujin Tian, Hangyuan Ji, Zhoujun Li, Tongliang Li, Tieqiao Zheng, et al. 2024. mABC: multi-Agent Blockchain-Inspired Collaboration for root cause analysis in micro-services architecture. arXiv preprint arXiv:2404.12135 (2024)." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.611, + 0.914, + 0.652 + ], + "angle": 0, + "content": "[28] Xuchao Zhang, Supriyo Ghosh, Chetan Bansal, Rujia Wang, Minghua Ma, Yu Kang, and Saravan Rajmohan. 2024. Automated root causing of cloud incidents using in-context learning with gpt-4. In Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering. 266-277." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.652, + 0.914, + 0.702 + ], + "angle": 0, + "content": "[29] Nengwen Zhao, Junjie Chen, Zhaoyang Yu, Honglin Wang, Jiesong Li, Bin Qiu, Hongyu Xu, Wenchi Zhang, Kaixin Sui, and Dan Pei. 2021. Identifying bad software changes via multimodal anomaly detection for online service systems. In Proceedings of the 29th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering. 527-539." + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.702, + 0.914, + 0.732 + ], + "angle": 0, + "content": "[30] Lecheng Zheng, Zhengzhang Chen, Jingrui He, and Haifeng Chen. 2024. Multimodal Causal Structure Learning and Root Cause Analysis. arXiv preprint arXiv:2402.02357 (2024)." + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.11, + 0.914, + 0.732 + ], + "angle": 0, + "content": null + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06614/e7bb7c32-5113-4b8e-ba10-d1799d972e03_origin.pdf b/data/2025/2504_06xxx/2504.06614/e7bb7c32-5113-4b8e-ba10-d1799d972e03_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1ea745a4d6da57fd0c8d3648c0963e785aa7e1fc --- /dev/null +++ b/data/2025/2504_06xxx/2504.06614/e7bb7c32-5113-4b8e-ba10-d1799d972e03_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1a29ef41117fdd55543e3b209d72b5620a51245217616f3edf4689e1480b6b7c +size 1710651 diff --git a/data/2025/2504_06xxx/2504.06614/full.md b/data/2025/2504_06xxx/2504.06614/full.md new file mode 100644 index 0000000000000000000000000000000000000000..2f6085ffe008ebc6217ae21b2d927de4b7a522f5 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06614/full.md @@ -0,0 +1,240 @@ +# AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents + +Lingzhe Zhang† +Peking University +Beijing, China +zhang.lingzhe@stu.pku.edu.cn + +Yunpeng Zhai† +Alibaba Group +Beijing, China +zhaiyunpeng.zyp@alibaba-inc.com + +Tong Jia* +Peking University +Beijing, China +jia.tong@oku.edu.cn + +Xiaosong Huang +Peking University +Beijing, China +hxs@stu.pku.edu.cn + +Chiming Duan +Peking University +Beijing, China +duanchiming@stu.pku.edu.cn + +Ying Li* +Peking University +Beijing, China +li.ying@pku.edu.cn + +# ABSTRACT + +Distributed databases are critical infrastructures for today's large-scale software systems, making effective failure management essential to ensure software availability. However, existing approaches often overlook the role distinctions within distributed databases and rely on small-scale models with limited generalization capabilities. In this paper, we conduct a preliminary empirical study to emphasize the unique significance of different roles. Building on this insight, we propose AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. AgentFM addresses failure management by considering system roles, data roles, and task roles, with a meta-agent orchestrating these components. Preliminary evaluations using Apache IoTDB demonstrate the effectiveness of AgentFM and open new directions for further research. + +# CCS CONCEPTS + +- Software and its engineering $\rightarrow$ Maintaining software. + +# KEYWORDS + +Failure Management, Distributed Databases, Multi Agents + +# ACM Reference Format: + +Lingzhe Zhang†, Yunpeng Zhai†, Tong Jia*, Xiaosong Huang, Chiming Duan, and Ying Li*. 2025. AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents. In 33rd ACM International Conference on the Foundations of Software Engineering (FSE '25), June 23–28, 2025, Trondheim, Norway. ACM, New York, NY, USA, 5 pages. https://doi.org/10.1145/3696630.3728492 + +$\dagger$ Equal contribution. +*Corresponding author. + +Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org. + +FSE '25, June 23-28, 2025, Trondheim, Norway + +© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM. + +ACM ISBN 979-8-4007-1276-0/25/06...$15.00 + +https://doi.org/10.1145/3696630.3728492 + +# 1 INTRODUCTION + +The distributed databases, such as Google Spanner [2], Alibaba OceanBase [16], TiDB [5], and Apache IoTDB [14], have become integral components of cloud infrastructures, handling vast volumes of data [7, 25]. + +However, these systems frequently encounter anomalies such as system failures and performance degradation, leading to significant financial losses. For example, Alibaba Cloud faces Intermittent Slow Queries (iSQs) [11], leading to billions of dollars in annual losses. Amazon reports that even a 0.1-second delay in loading caused by database anomalies can lead to a $1\%$ increase in financial losses [19]. Therefore, it is crucial to detect system failures in real time, analyze the root causes of these failures, and automatically remediate them. + +System traces, metrics, and logs capture the states and critical events of active processes, making them essential for managing software failures. These data sources provide insights into both normal operations and deviations signaling potential failures. Leveraging their multimodal nature, recent research has enhanced anomaly detection and diagnosis in complex systems [6, 8-10, 17, 20-24, 26, 29, 30]. For example, Eadro [8] integrates anomaly detection and root cause localization using multi-source data, while AnoFusion [26] employs unsupervised multimodal failure detection in microservices. MSTGAD [6] combines all three data types with attentive multimodal learning for graph-based anomaly detection. Studies also explore two-source combinations, such as DeepTraLog [17], which pairs traces with logs using a GGNN-based model, and SCWarn [29], which identifies anomalies via heterogeneous metrics and logs. Although the effectiveness of these methods has been demonstrated in specific scenarios, they face several practical challenges when applied to distributed databases: + +- Role Awareness. In distributed databases, nodes play different roles, each with varying levels of importance. Effective failure management often requires coordination and collaboration among multiple nodes. However, current methods completely overlook these roles, leading to suboptimal fault detection, inaccurate root cause diagnosis, and an inability to provide critical repair strategies. +- Application Limitations. Existing failure management models lack generalizability. For instance, these models are typically trained on specific systems and encounter concept drift when applied to new systems. Moreover, since these + +methods often frame the task as a classification problem, the resulting outputs lack interpretability, which is crucial for Operations and Control Engineers (OCEs) to effectively resolve issues. + +To address the first challenge, we conduct a detailed analysis of the various roles within a distributed database and the entire failure management process. We ultimately identify three distinct role categories: system roles, data roles, and task roles. System roles represent the various roles inherent in the distributed database itself (e.g., leader nodes, follower nodes). Data roles refer to the different data sources involved in failure management, while task roles represent the various tasks that need to be executed during failure management. + +To tackle the second challenge, we adopt a large language model (LLM)-based approach. While many existing LLM-based failure management solutions have been proposed, a significant number of them do not utilize the three types of data sources mentioned above [1, 3, 12, 18, 28]. Moreover, some approaches that do incorporate these data sources fail to account for the role-based structure within distributed databases [4, 13, 27]. Therefore, we propose a role-aware, LLM-driven multi-agent approach that integrates the characteristics of these roles and the three data sources. + +Building on these insights, we introduce AgentFM, a comprehensive role-aware failure management framework for distributed databases, powered by LLM-driven multi-agent systems. AgentFM integrates the unique characteristics of distributed database roles with the rich multimodal data sources typically encountered in failure management, such as system traces, metrics, and logs. By employing a multi-agent architecture, AgentFM facilitates specialized agents for each role—system roles, data roles, and task roles—ensuring a more nuanced and effective approach to failure detection, diagnosis, and resolution. + +We conduct preliminary experiments on Apache IoTDB [14], a distributed time-series database system, to assess the effectiveness of AgentFM in failure detection and root cause analysis. Furthermore, we manually verified the accuracy and validity of the generated mitigation solution. + +Contributions. The contributions of this paper are threefold. First, we conduct a preliminary empirical study highlighting the varying significance of different roles in failure management. Second, we propose AgentFM, a role-aware failure management framework for distributed databases with LLM-driven multi-agents. Third, we provide a preliminary evaluation of AgentFM on Apache IoTDB, demonstrating its feasibility and effectiveness. + +# 2 PRELIMINARY EMPIRCAL STUDY + +In this section, we conduct a preliminary empirical study using Apache IoTDB [14], focusing on the significance of different roles in the failure management process. + +# 2.1 System Roles + +We manually injected anomalies (excessive data export) into Apache IoTDB during runtime and evaluate the anomaly detection performance (precision, recall and f1-score) for each node using PLELog [15], which is a state-of-the-art log-based anomaly detection method. + +Table 1: Anomaly Detection Results for Each Node + +
Node1Node2Node3Node4Node5Node6
p39.68%34.35%59.64%31.17%77.60%89.42%
r99.01%100.00%98.02%100.00%96.04%92.08%
f156.66%51.14%74.16%47.53%85.84%90.73%
+ +As shown in Table 1, $\text{Node}_6$ achieves the best anomaly detection performance due to hosting the largest number of leader partitions. In contrast, the detection performance on other nodes is relatively suboptimal. This observation underscores the differing significance of various system roles. + +# 2.2 Data Roles + +We further conduct anomaly diagnosis classification experiments on Apache IoTDB using both metrics data and log data. To ensure fairness, we do not adopt state-of-the-art methods; instead, we implement a simple classification algorithm based on the Dynamic Time Warping (DTW) algorithm. + +Table 2: Anomalies that can be Classified by Metrics and Logs + +
SourceCPUMemoryExportImportConfiguration
Metrics
Logs
+ +As shown in Table 2, anomalies with an F1-score above $50\%$ are considered identifiable by the corresponding data type. The results reveal that metrics are particularly effective at detecting resource anomalies, such as CPU and memory saturation, while logs excel at identifying internal database issues, including excessive data export/import and configuration errors. This observation highlights the distinct significance of different data roles. + +# 3 METHODOLOGY + +Our preliminary empirical study highlights that the effectiveness of failure management varies significantly across different roles in distributed databases. Consequently, it becomes crucial to assign varying levels of importance to different roles at different times and adopt role-specific operations to optimize outcomes. + +In this section, we introduce AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. Figure 1 illustrates the architecture of AgentFM, which comprises three types of agents corresponding to distinct roles: system agents, data agents, and task agents. System agents represent the various roles of nodes within the distributed database, data agents handle information from diverse data sources, and task agents focus on executing specific failure management tasks. Notably, since system agents and data agents operate independently on each node, they are collectively referred to as standalone agents. Additionally, the framework incorporates a meta-agent, which is responsible for orchestrating, adapting, aggregating, and ultimately producing the final results of the agents' operations, ensuring cohesive and efficient failure management. + +![](images/7ef0973ddf67eba3d8f31da2d5321ac881ef3abb0947fe701af8f7be32c7d8d4.jpg) +Figure 1: AgentFM Architecture + +The framework operates by first extracting critical information from different system agents using the corresponding data agents. This information is then transmitted to a centralized meta-agent for unified decision-making. The decision-making process employs trace data as a cohesive framework to integrate key insights across nodes. Finally, the appropriate task agents execute the required failure management operations, completing the process efficiently and effectively. + +# 3.1 System Agents + +Different distributed databases assign dynamic system roles that may change during runtime (e.g., leader re-election after node failure). To handle this, we designed an adaptive mechanism for system agents within the Meta Agent. + +As illustrated in Figure 2, the core component is the System Role Manager, which initializes by analyzing configuration files and system docs to identify each node's role and importance. At runtime, it periodically queries the system state and updates roles accordingly. + +Based on this process, each node instantiates a corresponding system agent—such as a Config Agent, Coordinator Agent, or Storage Agent—each embedding multiple data agents to handle specialized tasks. + +# 3.2 Data Agents + +In this paper, we define two types of data agents: the Metric Agent and the Log Agent. Each adopts a distinct approach to extract key information from the raw metrics data and log data, respectively. + +Metric Agent: The overall workflow of the Metric Agent begins with simple data preprocessing, which includes noise removal and imputation of missing values. After preprocessing, the multivariate + +![](images/e799b8f839733eb02eb2359bbd4dcbe3748cb87fc8ec65c27cef3a711b389358.jpg) +Figure 2: System Agents Adaptation Workflow + +time-series data is converted into natural language descriptions using a large model. The generated natural language descriptions capture key aspects of the data, including time intervals, fluctuation trends, and anomaly points. + +Formally, let $\mathbf{M} = \{m_1, m_2, \dots, m_n\}$ represent the raw multidimensional metrics data, where each $m_i$ corresponds to a time-series for a specific metric. The preprocessing step can be represented as Equation 1, where $\mathbf{M}_{\mathrm{p}}$ is the denoised and imputed version of $M$ . + +$$ +\mathbf {M} _ {\mathrm {p}} = \text {P r e p r o c e s s} (\mathbf {M}) = \left\{m _ {1} ^ {\prime}, m _ {2} ^ {\prime}, \dots , m _ {n} ^ {\prime} \right\} \tag {1} +$$ + +Next, the processed data is converted into natural language descriptions using a large language model $\mathcal{L}$ , as shown in Equation 2, + +where $\mathbf{D}_{\mathrm{nl}}$ represents the resulting natural language description, which includes information on time intervals, trends, and identified anomalies in the data. + +$$ +\mathbf {D} _ {\mathrm {n l}} = \mathcal {L} \left(\mathbf {M} _ {\mathrm {p}}\right) \tag {2} +$$ + +Log Agent: Unlike metrics data, log data is written by developers to monitor the internal state of the system, inherently containing rich semantic information. However, log data often includes substantial redundant information, such as repetitive log entries and messages unrelated to system anomalies. + +To address this, the Log Agent comprises two main components: sequence compression and semantic compression. Sequence compression focuses on reducing the raw log sequence by consolidating repetitive log patterns, while semantic compression extracts key operational information from the logs. + +Sequence Compression: This component utilizes a log-parsing algorithm to transform each log entry into a distinct event template consisting of a static constant part and variable parameters. It then merges consecutive occurrences of identical event templates. Formally, for a given raw log sequence $L = \{l_1, l_2, \dots, l_N\}$ , where $l_n$ represents an individual log entry, the parsing process transforms $L$ into $L_e = \{e_1, e_2, \dots, e_N\}$ , where $e_i$ represents an event template. Consecutive identical events are merged, yielding $G' = \{e_1', e_2', \dots, e_C'\}$ , where $C \ll N$ and $e_i' = e_i \times c$ represents the event $e_i$ with a count $c$ . + +Semantic Compression: This component employs a prompt-based method to convert lengthy log sequences into descriptions of the database's ongoing operations. Formally, given a sequence of raw log entries $L = \{l_1, l_2, \dots, l_N\}$ , the LLM-based summarization transforms and compresses $L$ into a smaller set of key operational elements $O = \{o_1, o_2, \dots, o_M\}$ , where $M \ll N$ . + +# 3.3 Task Agents + +After system agents collect necessary data via data agents, the meta agent coordinates task agents to carry out failure management using trace data. This process involves three agent types—detection, diagnosis, and mitigation—which operate sequentially. Detection agents identify anomalies in each time window; upon detection, diagnosis agents locate and classify the issue; finally, mitigation agents propose solutions based on the diagnosis. + +Though prompts vary by task, all agents follow a common RAG+CoT approach, using historical data as labeled examples—normal/abnormal for detection, failure types for diagnosis—to guide reasoning during querying. + +# 4 PRELIMINARY EVALUATION + +# 4.1 Design + +To evaluate AgentFM, we assess its feasibility and effectiveness in Apache IoTDB. We manually injected 10 types of anomalies, including CPU saturation, IO saturation, memory saturation, network delay increase, network bandwidth limitation, network partition occurrence, workload spikes, accompanying slow queries, excessive data export, and excessive data import. Each anomaly type is injected 20 times. + +The evaluation is conducted based on Qwen2.5-72b to assess the results of anomaly detection and diagnosis. The performance is + +measured using precision, recall, and F1-score metrics. Additionally, the mitigation solutions generated are manually analyzed to determine their usefulness. + +# 4.2 Results + +We utilize the function call feature to guide the model in generating "yes" or "no" responses to evaluate anomaly detection performance. For failure diagnosis, we manually review the LLM-generated results to identify the classification most closely matching the pre-injected anomalies and assessed the outcomes accordingly. + +Table 3: Anomaly Detection & Diagnosis Results + +
TaskPrecisionRecallF1-Score
Anomaly Detection95.14%97.03%95.76%
Failure Diagnosis89.61%87.04%87.62%
+ +As shown in Table 3, AgentFM demonstrates promising performance in our preliminary experiments. However, based on our observations, the model struggles to identify correct results when dealing with complex internal anomalies (e.g., excessive data import). We plan to focus on optimizing performance for such scenarios in future work. + +# Increase CPU Resources: + +Iotdb0 frequently experiences high CPU usage, consider increasing the number of CPU cores allocated to the container or raising the CPU limit. + +# Load Balancing: + +If iotdb0 is part of a cluster, consider distributing some of the load to other nodes to achieve load balancing. + +Figure 3: Sample Mitigation Solutions from AgentFM + +We also conduct a manual analysis of the mitigation solutions recommended by AgentFM. One example is illustrated in Figure 3. We extract the key information from the solution and find that these recommendations provide targeted and effective assistance in system remediation. + +# 5 CONCLUSION + +This paper investigates the varying significance of different roles in failure management. Building on this insight, we propose AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. Our preliminary experiments demonstrate the feasibility of AgentFM. In the future, we plan to further enhance AgentFM's capabilities in automating remediation execution. + +# ACKNOWLEDGEMENT + +This work is supported by Key-Area Research and Development Program of Guangdong Province, China (NO.2020B010164003). + +# REFERENCES + +[1] Toufique Ahmed, Supriyo Ghosh, Chetan Bansal, Thomas Zimmermann, Xuchao Zhang, and Saravan Rajmohan. 2023. Recommending root-cause and mitigation steps for cloud incidents using large language models. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1737-1749. +[2] James C Corbett, Jeffrey Dean, Michael Epstein, Andrew Fikes, Christopher Frost, Jeffrey John Furman, Sanjay Ghemawat, Andrey Gubarev, Christopher Heiser, Peter Hochschild, et al. 2013. Spanner: Google's globally distributed database. ACM Transactions on Computer Systems (TOCS) 31, 3 (2013), 1-22. +[3] Drishti Goel, Fiza Husain, Aditya Singh, Supriyo Ghosh, Anjaly Parayil, Chetan Bansal, Xuchao Zhang, and Saravan Rajmohan. 2024. X-lifecycle learning for cloud incident management using llms. In Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering. 417-428. +[4] Adha Hrusto, Per Runeson, and Magnus C Ohlsson. 2024. Autonomous monitors for detecting failures early and reporting interpretable alerts in cloud operations. In Proceedings of the 46th International Conference on Software Engineering: Software Engineering in Practice. 47-57. +[5] Dongxu Huang, Qi Liu, Qiu Cui, Zhuhe Fang, Xiaoyu Ma, Fei Xu, Li Shen, Liu Tang, Yuxing Zhou, Menglong Huang, et al. 2020. TiDB: a Raft-based HTAP database. Proceedings of the VLDB Endowment 13, 12 (2020), 3072-3084. +[6] Jun Huang, Yang Yang, Hang Yu, Jianguo Li, and Xiao Zheng. 2023. Twin graph-based anomaly detection via attentive multi-modal learning for microservice system. In 2023 38th IEEE/ACM International Conference on Automated Software Engineering (ASE). IEEE, 66-78. +[7] Yuyuan Kang, Xiangdong Huang, Shaoxu Song, Lingzhe Zhang, Jialin Qiao, Chen Wang, Jianmin Wang, and Julian Feinauer. 2022. Separation or not: On handing out-of-order time-series data in leveled LSM-tree. In 2022 IEEE 38th International Conference on Data Engineering (ICDE). IEEE, 3340-3352. +[8] Cheryl Lee, Tianyi Yang, Zhuangbin Chen, Yuxin Su, and Michael R Lyu. 2023. Eadro: An end-to-end troubleshooting framework for microservices on multi-source data. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1750-1762. +[9] Cheryl Lee, Tianyi Yang, Zhuangbin Chen, Yuxin Su, Yongqiang Yang, and Michael R Lyu. 2023. Heterogeneous anomaly detection for software systems via semi-supervised cross-modal attention. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1724-1736. +[10] Cheng-Ming Lin, Ching Chang, Wei-Yao Wang, Kuang-Da Wang, and Wen-Chih Peng. 2024. Root Cause Analysis in Microservice Using Neural Granger Causal Discovery. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 206-213. +[11] Minghua Ma, Zheng Yin, Shenglin Zhang, Sheng Wang, Christopher Zheng, Xinhao Jiang, Hanwen Hu, Cheng Luo, Yilin Li, Nengjun Qiu, et al. 2020. Diagnosing root causes of intermittent slow queries in cloud databases. Proceedings of the VLDB Endowment 13, 8 (2020), 1176-1189. +[12] Devjeet Roy, Xuchao Zhang, Rashi Bhave, Chetan Bansal, Pedro Las-Casas, Rodrigo Fonseca, and Saravan Rajmohan. 2024. Exploring IIm-based agents for root cause analysis. In *Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering.* 208-219. +[13] Manish Shetty, Yinfang Chen, Gagan Somashekar, Minghua Ma, Yogesh Simmhan, Xuchao Zhang, Jonathan Mace, Dax Vandevoorde, Pedro Las-Casas, Shachee Mishra Gupta, et al. 2024. Building AI Agents for Autonomous Clouds: Challenges and Design Principles. In Proceedings of the 2024 ACM Symposium on Cloud Computing, 99-110. +[14] Chen Wang, Xiangdong Huang, Jialin Qiao, Tian Jiang, Lei Rui, Jinrui Zhang, Rong Kang, Julian Feinauer, Kevin A McGrail, Peng Wang, et al. 2020. Apache iotdb: time-series database for internet of things. Proceedings of the VLDB Endowment 13, 12 (2020), 2901-2904. +[15] Lin Yang, Junjie Chen, Zan Wang, Weijing Wang, Jiajun Jiang, Xuyuan Dong, and Wenbin Zhang. 2021. Semi-supervised log-based anomaly detection via probabilistic label estimation. In 2021 IEEE/ACM 43rd International Conference on + +Software Engineering (ICSE). IEEE, 1448-1460. +[16] Zhenkun Yang, Chuanhui Yang, Fusheng Han, Mingqiang Zhuang, Bing Yang, Zhifeng Yang, Xiaojun Cheng, Yuzhong Zhao, Wenhui Shi, Huafeng Xi, et al. 2022. OceanBase: a 707 million tpmC distributed relational database system. Proceedings of the VLDB Endowment 15, 12 (2022), 3385-3397. +[17] Chenxi Zhang, Xin Peng, Chaofeng Sha, Ke Zhang, Zhenqing Fu, Xiya Wu, Qingwei Lin, and Dongmei Zhang. 2022. Deepralog: Trace-log combined microservice anomaly detection through graph-based deep learning. In Proceedings of the 44th international conference on software engineering. 623-634. +[18] Dylan Zhang, Xuchao Zhang, Chetan Bansal, Pedro Las-Casas, Rodrigo Fonseca, and Saravan Rajmohan. 2024. LM-PACE: Confidence estimation by large language models for effective root causing of cloud incidents. In *Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering*, 388-398. +[19] Lingzhe Zhang, Tong Jia, Mengxi Jia, Ying Li, Yong Yang, and Zhonghai Wu. 2024. Multivariate Log-based Anomaly Detection for Distributed Database. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4256-4267. +[20] Lingzhe Zhang, Tong Jia, Mengxi Jia, Hongyi Liu, Yong Yang, Zhonghai Wu, and Ying Li. 2024. Towards Close-To-Zero Runtime Collection Overhead: Raft-Based Anomaly Diagnosis on System Faults for Distributed Storage System. IEEE Transactions on Services Computing (2024). +[21] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Aiwei Liu, Yong Yang, Zhonghai Wu, Xuming Hu, Philip S Yu, and Ying Li. 2024. A Survey of AIOps for Failure Management in the Era of Large Language Models. arXiv preprint arXiv:2406.11213 (2024). +[22] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Hongyi Liu, and Ying Li. 2025. ScalaLog: Scalable Log-Based Failure Diagnosis Using LLM. In ICASSP 2025-2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 1-5. +[23] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Hongyi Liu, and Ying Li. 2025. XRAGLog: A Resource-Efficient and Context-Aware Log-Based Anomaly Detection Method Using Retrieval-Augmented Generation. In AAAI 2025 Workshop on Preventing and Detecting LLM Misinformation (PDLM). +[24] Lingzhe Zhang, Tong Jia, Kangjin Wang, Mengxi Jia, Yong Yang, and Ying Li. 2024. Reducing Events to Augment Log-based Anomaly Detection Models: An Empirical Study. In Proceedings of the 18th ACM/IEEE International Symposium on Empirical Software Engineering and Measurement. 538-548. +[25] Ling-Zhe Zhang, Xiang-Dong Huang, Yan-Kai Wang, Jia-Lin Qiao, Shao-Xu Song, and Jian-Min Wang. 2024. Time-tired compaction: An elastic compaction scheme for LSM-tree based time-series database. Advanced Engineering Informatics 59 (2024), 102224. +[26] Shenglin Zhang, Pengxiang Jin, Zihan Lin, Yongqian Sun, Bicheng Zhang, Sibo Xia, Zhengdan Li, Zhenyu Zhong, Minghua Ma, Wa Jin, et al. 2023. Robust failure diagnosis of microservice system through multimodal data. IEEE Transactions on Services Computing 16, 6 (2023), 3851-3864. +[27] Wei Zhang, Hongcheng Guo, Jian Yang, Yi Zhang, Chaoran Yan, Zhoujin Tian, Hangyuan Ji, Zhoujun Li, Tongliang Li, Tieqiao Zheng, et al. 2024. mABC: multi-Agent Blockchain-Inspired Collaboration for root cause analysis in micro-services architecture. arXiv preprint arXiv:2404.12135 (2024). +[28] Xuchao Zhang, Supriyo Ghosh, Chetan Bansal, Rujia Wang, Minghua Ma, Yu Kang, and Saravan Rajmohan. 2024. Automated root causing of cloud incidents using in-context learning with gpt-4. In Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering. 266-277. +[29] Nengwen Zhao, Junjie Chen, Zhaoyang Yu, Honglin Wang, Jiesong Li, Bin Qiu, Hongyu Xu, Wenchi Zhang, Kaixin Sui, and Dan Pei. 2021. Identifying bad software changes via multimodal anomaly detection for online service systems. In Proceedings of the 29th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering. 527-539. +[30] Lecheng Zheng, Zhengzhang Chen, Jingrui He, and Haifeng Chen. 2024. Multimodal Causal Structure Learning and Root Cause Analysis. arXiv preprint arXiv:2402.02357 (2024). \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06614/images/1eaf2908e2b5e1be5916fea854d4db8f8c6eed97d6b2241d3eb2755794a30451.jpg b/data/2025/2504_06xxx/2504.06614/images/1eaf2908e2b5e1be5916fea854d4db8f8c6eed97d6b2241d3eb2755794a30451.jpg new file mode 100644 index 0000000000000000000000000000000000000000..de0c5c875a68369c98cb0dfd11d1bad8f8e693c3 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06614/images/1eaf2908e2b5e1be5916fea854d4db8f8c6eed97d6b2241d3eb2755794a30451.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e313747d948d9b0d11c3287e796d2f3eb3a11ee68e6d1e6cf5a796cf40eb980a +size 23223 diff --git a/data/2025/2504_06xxx/2504.06614/images/48afdf90edf72752714f78e53226a211faa8aea67c7d6ed209b1f4cbb1fcf1c6.jpg b/data/2025/2504_06xxx/2504.06614/images/48afdf90edf72752714f78e53226a211faa8aea67c7d6ed209b1f4cbb1fcf1c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a6a1759bd037dcf55550c88fb336277abaed48ed --- /dev/null +++ b/data/2025/2504_06xxx/2504.06614/images/48afdf90edf72752714f78e53226a211faa8aea67c7d6ed209b1f4cbb1fcf1c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32c66f9f2cbb82c692f78f8464f5ed39d3ca65f5823014f230f62b7d9d98ae77 +size 2411 diff --git a/data/2025/2504_06xxx/2504.06614/images/4ab02666500b97395a6c8de3ce496b691bb00849e2af93ec40212bc1d38518a3.jpg b/data/2025/2504_06xxx/2504.06614/images/4ab02666500b97395a6c8de3ce496b691bb00849e2af93ec40212bc1d38518a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e127159f2c0dabd311dfa48b8515c524b243fbb4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06614/images/4ab02666500b97395a6c8de3ce496b691bb00849e2af93ec40212bc1d38518a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4a902ae92d692d5662626a713f891f8c7341dd07e764ddc1fa367931b16b2cc5 +size 15489 diff --git a/data/2025/2504_06xxx/2504.06614/images/7ef0973ddf67eba3d8f31da2d5321ac881ef3abb0947fe701af8f7be32c7d8d4.jpg b/data/2025/2504_06xxx/2504.06614/images/7ef0973ddf67eba3d8f31da2d5321ac881ef3abb0947fe701af8f7be32c7d8d4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f30fd48c651b2909d004d471626276c11c840ff4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06614/images/7ef0973ddf67eba3d8f31da2d5321ac881ef3abb0947fe701af8f7be32c7d8d4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ef90d430313f9f3c670f62f5a9fecc30f73ad562f01e170d327c3e71a6e449c +size 123179 diff --git a/data/2025/2504_06xxx/2504.06614/images/82e7fce4ff67186806eecc16a65a5aafad2abe658c41fa7061b4b2c82185027f.jpg b/data/2025/2504_06xxx/2504.06614/images/82e7fce4ff67186806eecc16a65a5aafad2abe658c41fa7061b4b2c82185027f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..20286f022bedf50bbf838b5be0625c77bfa249db --- /dev/null +++ b/data/2025/2504_06xxx/2504.06614/images/82e7fce4ff67186806eecc16a65a5aafad2abe658c41fa7061b4b2c82185027f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:eb35c998cce958af012871a8d517661251c8c07b62ecc0eb797422961832c16d +size 16232 diff --git a/data/2025/2504_06xxx/2504.06614/images/c356888d6eb6b4b08030830f723c247e588c021a4bfa2535dc850f753e59f561.jpg b/data/2025/2504_06xxx/2504.06614/images/c356888d6eb6b4b08030830f723c247e588c021a4bfa2535dc850f753e59f561.jpg new file mode 100644 index 0000000000000000000000000000000000000000..270bc692135d7d924b186b59e2fa20255554aba2 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06614/images/c356888d6eb6b4b08030830f723c247e588c021a4bfa2535dc850f753e59f561.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72fbeb9d66053611d24a34ee60774ec4f27e0146a78a565dfcd23e065ea0bcb7 +size 4824 diff --git a/data/2025/2504_06xxx/2504.06614/images/e799b8f839733eb02eb2359bbd4dcbe3748cb87fc8ec65c27cef3a711b389358.jpg b/data/2025/2504_06xxx/2504.06614/images/e799b8f839733eb02eb2359bbd4dcbe3748cb87fc8ec65c27cef3a711b389358.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a1d0c7aa6713e097f46cf7a5185843a69b35f05a --- /dev/null +++ b/data/2025/2504_06xxx/2504.06614/images/e799b8f839733eb02eb2359bbd4dcbe3748cb87fc8ec65c27cef3a711b389358.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b55cff4865e11857a07ad6cfc91f2f3bd98da5b70c42290bc52d44e832cc9ca +size 48571 diff --git a/data/2025/2504_06xxx/2504.06614/layout.json b/data/2025/2504_06xxx/2504.06614/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..1fd3265f3456ef40d5c365e0eb4b0f4a334f2aa5 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06614/layout.json @@ -0,0 +1,5127 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 94, + 80, + 516, + 120 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 80, + 516, + 120 + ], + "spans": [ + { + "bbox": [ + 94, + 80, + 516, + 120 + ], + "type": "text", + "content": "AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 78, + 129, + 205, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 129, + 205, + 178 + ], + "spans": [ + { + "bbox": [ + 78, + 129, + 205, + 178 + ], + "type": "text", + "content": "Lingzhe Zhang† \nPeking University \nBeijing, China \nzhang.lingzhe@stu.pku.edu.cn" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 233, + 129, + 378, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 129, + 378, + 179 + ], + "spans": [ + { + "bbox": [ + 233, + 129, + 378, + 179 + ], + "type": "text", + "content": "Yunpeng Zhai† \nAlibaba Group \nBeijing, China \nzhaiyunpeng.zyp@alibaba-inc.com" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 425, + 130, + 512, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 425, + 130, + 512, + 179 + ], + "spans": [ + { + "bbox": [ + 425, + 130, + 512, + 179 + ], + "type": "text", + "content": "Tong Jia* \nPeking University \nBeijing, China \njia.tong@oku.edu.cn" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 99, + 190, + 183, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 190, + 183, + 238 + ], + "spans": [ + { + "bbox": [ + 99, + 190, + 183, + 238 + ], + "type": "text", + "content": "Xiaosong Huang \nPeking University \nBeijing, China \nhxs@stu.pku.edu.cn" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 243, + 189, + 367, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 243, + 189, + 367, + 239 + ], + "spans": [ + { + "bbox": [ + 243, + 189, + 367, + 239 + ], + "type": "text", + "content": "Chiming Duan \nPeking University \nBeijing, China \nduanchiming@stu.pku.edu.cn" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 429, + 189, + 510, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 429, + 189, + 510, + 239 + ], + "spans": [ + { + "bbox": [ + 429, + 189, + 510, + 239 + ], + "type": "text", + "content": "Ying Li* \nPeking University \nBeijing, China \nli.ying@pku.edu.cn" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 51, + 245, + 112, + 255 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 245, + 112, + 255 + ], + "spans": [ + { + "bbox": [ + 51, + 245, + 112, + 255 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 261, + 296, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 261, + 296, + 413 + ], + "spans": [ + { + "bbox": [ + 50, + 261, + 296, + 413 + ], + "type": "text", + "content": "Distributed databases are critical infrastructures for today's large-scale software systems, making effective failure management essential to ensure software availability. However, existing approaches often overlook the role distinctions within distributed databases and rely on small-scale models with limited generalization capabilities. In this paper, we conduct a preliminary empirical study to emphasize the unique significance of different roles. Building on this insight, we propose AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. AgentFM addresses failure management by considering system roles, data roles, and task roles, with a meta-agent orchestrating these components. Preliminary evaluations using Apache IoTDB demonstrate the effectiveness of AgentFM and open new directions for further research." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 425, + 135, + 436 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 425, + 135, + 436 + ], + "spans": [ + { + "bbox": [ + 51, + 425, + 135, + 436 + ], + "type": "text", + "content": "CCS CONCEPTS" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 440, + 279, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 440, + 279, + 451 + ], + "spans": [ + { + "bbox": [ + 51, + 440, + 279, + 451 + ], + "type": "text", + "content": "- Software and its engineering " + }, + { + "bbox": [ + 51, + 440, + 279, + 451 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 51, + 440, + 279, + 451 + ], + "type": "text", + "content": " Maintaining software." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 462, + 116, + 473 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 462, + 116, + 473 + ], + "spans": [ + { + "bbox": [ + 51, + 462, + 116, + 473 + ], + "type": "text", + "content": "KEYWORDS" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 51, + 477, + 266, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 477, + 266, + 488 + ], + "spans": [ + { + "bbox": [ + 51, + 477, + 266, + 488 + ], + "type": "text", + "content": "Failure Management, Distributed Databases, Multi Agents" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 51, + 495, + 141, + 505 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 495, + 141, + 505 + ], + "spans": [ + { + "bbox": [ + 51, + 495, + 141, + 505 + ], + "type": "text", + "content": "ACM Reference Format:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 50, + 506, + 295, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 506, + 295, + 566 + ], + "spans": [ + { + "bbox": [ + 50, + 506, + 295, + 566 + ], + "type": "text", + "content": "Lingzhe Zhang†, Yunpeng Zhai†, Tong Jia*, Xiaosong Huang, Chiming Duan, and Ying Li*. 2025. AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents. In 33rd ACM International Conference on the Foundations of Software Engineering (FSE '25), June 23–28, 2025, Trondheim, Norway. ACM, New York, NY, USA, 5 pages. https://doi.org/10.1145/3696630.3728492" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 51, + 582, + 121, + 601 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 51, + 582, + 112, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 582, + 112, + 591 + ], + "spans": [ + { + "bbox": [ + 51, + 582, + 112, + 591 + ], + "type": "inline_equation", + "content": "\\dagger" + }, + { + "bbox": [ + 51, + 582, + 112, + 591 + ], + "type": "text", + "content": " Equal contribution." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 51, + 591, + 121, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 591, + 121, + 601 + ], + "spans": [ + { + "bbox": [ + 51, + 591, + 121, + 601 + ], + "type": "text", + "content": "*Corresponding author." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 50, + 618, + 295, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 618, + 295, + 675 + ], + "spans": [ + { + "bbox": [ + 50, + 618, + 295, + 675 + ], + "type": "text", + "content": "Permission to make digital or hard copies of all or part of this work for personal or classroom use is granted without fee provided that copies are not made or distributed for profit or commercial advantage and that copies bear this notice and the full citation on the first page. Copyrights for components of this work owned by others than the author(s) must be honored. Abstracting with credit is permitted. To copy otherwise, or republish, to post on servers or to redistribute to lists, requires prior specific permission and/or a fee. Request permissions from permissions@acm.org." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 51, + 675, + 183, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 675, + 183, + 684 + ], + "spans": [ + { + "bbox": [ + 51, + 675, + 183, + 684 + ], + "type": "text", + "content": "FSE '25, June 23-28, 2025, Trondheim, Norway" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 51, + 685, + 289, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 685, + 289, + 693 + ], + "spans": [ + { + "bbox": [ + 51, + 685, + 289, + 693 + ], + "type": "text", + "content": "© 2025 Copyright held by the owner/author(s). Publication rights licensed to ACM." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 51, + 693, + 180, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 693, + 180, + 700 + ], + "spans": [ + { + "bbox": [ + 51, + 693, + 180, + 700 + ], + "type": "text", + "content": "ACM ISBN 979-8-4007-1276-0/25/06...$15.00" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 51, + 700, + 167, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 700, + 167, + 709 + ], + "spans": [ + { + "bbox": [ + 51, + 700, + 167, + 709 + ], + "type": "text", + "content": "https://doi.org/10.1145/3696630.3728492" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 315, + 245, + 421, + 256 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 245, + 421, + 256 + ], + "spans": [ + { + "bbox": [ + 315, + 245, + 421, + 256 + ], + "type": "text", + "content": "1 INTRODUCTION" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 314, + 260, + 559, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 260, + 559, + 304 + ], + "spans": [ + { + "bbox": [ + 314, + 260, + 559, + 304 + ], + "type": "text", + "content": "The distributed databases, such as Google Spanner [2], Alibaba OceanBase [16], TiDB [5], and Apache IoTDB [14], have become integral components of cloud infrastructures, handling vast volumes of data [7, 25]." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 314, + 304, + 559, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 304, + 559, + 392 + ], + "spans": [ + { + "bbox": [ + 314, + 304, + 559, + 392 + ], + "type": "text", + "content": "However, these systems frequently encounter anomalies such as system failures and performance degradation, leading to significant financial losses. For example, Alibaba Cloud faces Intermittent Slow Queries (iSQs) [11], leading to billions of dollars in annual losses. Amazon reports that even a 0.1-second delay in loading caused by database anomalies can lead to a " + }, + { + "bbox": [ + 314, + 304, + 559, + 392 + ], + "type": "inline_equation", + "content": "1\\%" + }, + { + "bbox": [ + 314, + 304, + 559, + 392 + ], + "type": "text", + "content": " increase in financial losses [19]. Therefore, it is crucial to detect system failures in real time, analyze the root causes of these failures, and automatically remediate them." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 313, + 392, + 559, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 392, + 559, + 578 + ], + "spans": [ + { + "bbox": [ + 313, + 392, + 559, + 578 + ], + "type": "text", + "content": "System traces, metrics, and logs capture the states and critical events of active processes, making them essential for managing software failures. These data sources provide insights into both normal operations and deviations signaling potential failures. Leveraging their multimodal nature, recent research has enhanced anomaly detection and diagnosis in complex systems [6, 8-10, 17, 20-24, 26, 29, 30]. For example, Eadro [8] integrates anomaly detection and root cause localization using multi-source data, while AnoFusion [26] employs unsupervised multimodal failure detection in microservices. MSTGAD [6] combines all three data types with attentive multimodal learning for graph-based anomaly detection. Studies also explore two-source combinations, such as DeepTraLog [17], which pairs traces with logs using a GGNN-based model, and SCWarn [29], which identifies anomalies via heterogeneous metrics and logs. Although the effectiveness of these methods has been demonstrated in specific scenarios, they face several practical challenges when applied to distributed databases:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 331, + 588, + 559, + 710 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 331, + 588, + 559, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 588, + 559, + 666 + ], + "spans": [ + { + "bbox": [ + 331, + 588, + 559, + 666 + ], + "type": "text", + "content": "- Role Awareness. In distributed databases, nodes play different roles, each with varying levels of importance. Effective failure management often requires coordination and collaboration among multiple nodes. However, current methods completely overlook these roles, leading to suboptimal fault detection, inaccurate root cause diagnosis, and an inability to provide critical repair strategies." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 331, + 666, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 666, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 331, + 666, + 559, + 710 + ], + "type": "text", + "content": "- Application Limitations. Existing failure management models lack generalizability. For instance, these models are typically trained on specific systems and encounter concept drift when applied to new systems. Moreover, since these" + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 218, + 36, + 555 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 218, + 36, + 555 + ], + "spans": [ + { + "bbox": [ + 14, + 218, + 36, + 555 + ], + "type": "text", + "content": "arXiv:2504.06614v1 [cs.SE] 9 Apr 2025" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 76, + 85, + 294, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 85, + 294, + 128 + ], + "spans": [ + { + "bbox": [ + 76, + 85, + 294, + 128 + ], + "type": "text", + "content": "methods often frame the task as a classification problem, the resulting outputs lack interpretability, which is crucial for Operations and Control Engineers (OCEs) to effectively resolve issues." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 132, + 294, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 132, + 294, + 230 + ], + "spans": [ + { + "bbox": [ + 50, + 132, + 294, + 230 + ], + "type": "text", + "content": "To address the first challenge, we conduct a detailed analysis of the various roles within a distributed database and the entire failure management process. We ultimately identify three distinct role categories: system roles, data roles, and task roles. System roles represent the various roles inherent in the distributed database itself (e.g., leader nodes, follower nodes). Data roles refer to the different data sources involved in failure management, while task roles represent the various tasks that need to be executed during failure management." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 231, + 294, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 231, + 294, + 328 + ], + "spans": [ + { + "bbox": [ + 50, + 231, + 294, + 328 + ], + "type": "text", + "content": "To tackle the second challenge, we adopt a large language model (LLM)-based approach. While many existing LLM-based failure management solutions have been proposed, a significant number of them do not utilize the three types of data sources mentioned above [1, 3, 12, 18, 28]. Moreover, some approaches that do incorporate these data sources fail to account for the role-based structure within distributed databases [4, 13, 27]. Therefore, we propose a role-aware, LLM-driven multi-agent approach that integrates the characteristics of these roles and the three data sources." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 330, + 294, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 330, + 294, + 438 + ], + "spans": [ + { + "bbox": [ + 50, + 330, + 294, + 438 + ], + "type": "text", + "content": "Building on these insights, we introduce AgentFM, a comprehensive role-aware failure management framework for distributed databases, powered by LLM-driven multi-agent systems. AgentFM integrates the unique characteristics of distributed database roles with the rich multimodal data sources typically encountered in failure management, such as system traces, metrics, and logs. By employing a multi-agent architecture, AgentFM facilitates specialized agents for each role—system roles, data roles, and task roles—ensuring a more nuanced and effective approach to failure detection, diagnosis, and resolution." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 439, + 294, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 439, + 294, + 493 + ], + "spans": [ + { + "bbox": [ + 50, + 439, + 294, + 493 + ], + "type": "text", + "content": "We conduct preliminary experiments on Apache IoTDB [14], a distributed time-series database system, to assess the effectiveness of AgentFM in failure detection and root cause analysis. Furthermore, we manually verified the accuracy and validity of the generated mitigation solution." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 494, + 294, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 494, + 294, + 570 + ], + "spans": [ + { + "bbox": [ + 50, + 494, + 294, + 570 + ], + "type": "text", + "content": "Contributions. The contributions of this paper are threefold. First, we conduct a preliminary empirical study highlighting the varying significance of different roles in failure management. Second, we propose AgentFM, a role-aware failure management framework for distributed databases with LLM-driven multi-agents. Third, we provide a preliminary evaluation of AgentFM on Apache IoTDB, demonstrating its feasibility and effectiveness." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 582, + 244, + 593 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 582, + 244, + 593 + ], + "spans": [ + { + "bbox": [ + 51, + 582, + 244, + 593 + ], + "type": "text", + "content": "2 PRELIMINARY EMPIRCAL STUDY" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 596, + 294, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 596, + 294, + 629 + ], + "spans": [ + { + "bbox": [ + 50, + 596, + 294, + 629 + ], + "type": "text", + "content": "In this section, we conduct a preliminary empirical study using Apache IoTDB [14], focusing on the significance of different roles in the failure management process." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 51, + 640, + 144, + 652 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 640, + 144, + 652 + ], + "spans": [ + { + "bbox": [ + 51, + 640, + 144, + 652 + ], + "type": "text", + "content": "2.1 System Roles" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 654, + 294, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 654, + 294, + 709 + ], + "spans": [ + { + "bbox": [ + 50, + 654, + 294, + 709 + ], + "type": "text", + "content": "We manually injected anomalies (excessive data export) into Apache IoTDB during runtime and evaluate the anomaly detection performance (precision, recall and f1-score) for each node using PLELog [15], which is a state-of-the-art log-based anomaly detection method." + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 320, + 104, + 553, + 160 + ], + "blocks": [ + { + "bbox": [ + 333, + 83, + 541, + 94 + ], + "lines": [ + { + "bbox": [ + 333, + 83, + 541, + 94 + ], + "spans": [ + { + "bbox": [ + 333, + 83, + 541, + 94 + ], + "type": "text", + "content": "Table 1: Anomaly Detection Results for Each Node" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 320, + 104, + 553, + 160 + ], + "lines": [ + { + "bbox": [ + 320, + 104, + 553, + 160 + ], + "spans": [ + { + "bbox": [ + 320, + 104, + 553, + 160 + ], + "type": "table", + "html": "
Node1Node2Node3Node4Node5Node6
p39.68%34.35%59.64%31.17%77.60%89.42%
r99.01%100.00%98.02%100.00%96.04%92.08%
f156.66%51.14%74.16%47.53%85.84%90.73%
", + "image_path": "1eaf2908e2b5e1be5916fea854d4db8f8c6eed97d6b2241d3eb2755794a30451.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 175, + 558, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 175, + 558, + 230 + ], + "spans": [ + { + "bbox": [ + 313, + 175, + 558, + 230 + ], + "type": "text", + "content": "As shown in Table 1, " + }, + { + "bbox": [ + 313, + 175, + 558, + 230 + ], + "type": "inline_equation", + "content": "\\text{Node}_6" + }, + { + "bbox": [ + 313, + 175, + 558, + 230 + ], + "type": "text", + "content": " achieves the best anomaly detection performance due to hosting the largest number of leader partitions. In contrast, the detection performance on other nodes is relatively suboptimal. This observation underscores the differing significance of various system roles." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 315, + 239, + 395, + 250 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 239, + 395, + 250 + ], + "spans": [ + { + "bbox": [ + 315, + 239, + 395, + 250 + ], + "type": "text", + "content": "2.2 Data Roles" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 254, + 558, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 254, + 558, + 308 + ], + "spans": [ + { + "bbox": [ + 313, + 254, + 558, + 308 + ], + "type": "text", + "content": "We further conduct anomaly diagnosis classification experiments on Apache IoTDB using both metrics data and log data. To ensure fairness, we do not adopt state-of-the-art methods; instead, we implement a simple classification algorithm based on the Dynamic Time Warping (DTW) algorithm." + } + ] + } + ], + "index": 16 + }, + { + "type": "table", + "bbox": [ + 317, + 341, + 558, + 389 + ], + "blocks": [ + { + "bbox": [ + 315, + 319, + 558, + 330 + ], + "lines": [ + { + "bbox": [ + 315, + 319, + 558, + 330 + ], + "spans": [ + { + "bbox": [ + 315, + 319, + 558, + 330 + ], + "type": "text", + "content": "Table 2: Anomalies that can be Classified by Metrics and Logs" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 341, + 558, + 389 + ], + "lines": [ + { + "bbox": [ + 317, + 341, + 558, + 389 + ], + "spans": [ + { + "bbox": [ + 317, + 341, + 558, + 389 + ], + "type": "table", + "html": "
SourceCPUMemoryExportImportConfiguration
Metrics
Logs
", + "image_path": "4ab02666500b97395a6c8de3ce496b691bb00849e2af93ec40212bc1d38518a3.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "table_body" + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 401, + 558, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 401, + 558, + 477 + ], + "spans": [ + { + "bbox": [ + 313, + 401, + 558, + 477 + ], + "type": "text", + "content": "As shown in Table 2, anomalies with an F1-score above " + }, + { + "bbox": [ + 313, + 401, + 558, + 477 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 313, + 401, + 558, + 477 + ], + "type": "text", + "content": " are considered identifiable by the corresponding data type. The results reveal that metrics are particularly effective at detecting resource anomalies, such as CPU and memory saturation, while logs excel at identifying internal database issues, including excessive data export/import and configuration errors. This observation highlights the distinct significance of different data roles." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 315, + 487, + 420, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 487, + 420, + 498 + ], + "spans": [ + { + "bbox": [ + 315, + 487, + 420, + 498 + ], + "type": "text", + "content": "3 METHODOLOGY" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 501, + 558, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 501, + 558, + 555 + ], + "spans": [ + { + "bbox": [ + 313, + 501, + 558, + 555 + ], + "type": "text", + "content": "Our preliminary empirical study highlights that the effectiveness of failure management varies significantly across different roles in distributed databases. Consequently, it becomes crucial to assign varying levels of importance to different roles at different times and adopt role-specific operations to optimize outcomes." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 556, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 556, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 313, + 556, + 559, + 710 + ], + "type": "text", + "content": "In this section, we introduce AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. Figure 1 illustrates the architecture of AgentFM, which comprises three types of agents corresponding to distinct roles: system agents, data agents, and task agents. System agents represent the various roles of nodes within the distributed database, data agents handle information from diverse data sources, and task agents focus on executing specific failure management tasks. Notably, since system agents and data agents operate independently on each node, they are collectively referred to as standalone agents. Additionally, the framework incorporates a meta-agent, which is responsible for orchestrating, adapting, aggregating, and ultimately producing the final results of the agents' operations, ensuring cohesive and efficient failure management." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 52, + 60, + 186, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 60, + 186, + 68 + ], + "spans": [ + { + "bbox": [ + 52, + 60, + 186, + 68 + ], + "type": "text", + "content": "FSE '25, June 23-28, 2025, Trondheim, Norway" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 497, + 60, + 558, + 68 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 497, + 60, + 558, + 68 + ], + "spans": [ + { + "bbox": [ + 497, + 60, + 558, + 68 + ], + "type": "text", + "content": "Lingzhe Zhang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 52, + 83, + 558, + 319 + ], + "blocks": [ + { + "bbox": [ + 52, + 83, + 558, + 319 + ], + "lines": [ + { + "bbox": [ + 52, + 83, + 558, + 319 + ], + "spans": [ + { + "bbox": [ + 52, + 83, + 558, + 319 + ], + "type": "image", + "image_path": "7ef0973ddf67eba3d8f31da2d5321ac881ef3abb0947fe701af8f7be32c7d8d4.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 238, + 328, + 371, + 340 + ], + "lines": [ + { + "bbox": [ + 238, + 328, + 371, + 340 + ], + "spans": [ + { + "bbox": [ + 238, + 328, + 371, + 340 + ], + "type": "text", + "content": "Figure 1: AgentFM Architecture" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 357, + 295, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 357, + 295, + 445 + ], + "spans": [ + { + "bbox": [ + 50, + 357, + 295, + 445 + ], + "type": "text", + "content": "The framework operates by first extracting critical information from different system agents using the corresponding data agents. This information is then transmitted to a centralized meta-agent for unified decision-making. The decision-making process employs trace data as a cohesive framework to integrate key insights across nodes. Finally, the appropriate task agents execute the required failure management operations, completing the process efficiently and effectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 51, + 458, + 151, + 471 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 458, + 151, + 471 + ], + "spans": [ + { + "bbox": [ + 51, + 458, + 151, + 471 + ], + "type": "text", + "content": "3.1 System Agents" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 472, + 295, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 472, + 295, + 516 + ], + "spans": [ + { + "bbox": [ + 50, + 472, + 295, + 516 + ], + "type": "text", + "content": "Different distributed databases assign dynamic system roles that may change during runtime (e.g., leader re-election after node failure). To handle this, we designed an adaptive mechanism for system agents within the Meta Agent." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 517, + 295, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 517, + 295, + 571 + ], + "spans": [ + { + "bbox": [ + 50, + 517, + 295, + 571 + ], + "type": "text", + "content": "As illustrated in Figure 2, the core component is the System Role Manager, which initializes by analyzing configuration files and system docs to identify each node's role and importance. At runtime, it periodically queries the system state and updates roles accordingly." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 571, + 295, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 571, + 295, + 614 + ], + "spans": [ + { + "bbox": [ + 50, + 571, + 295, + 614 + ], + "type": "text", + "content": "Based on this process, each node instantiates a corresponding system agent—such as a Config Agent, Coordinator Agent, or Storage Agent—each embedding multiple data agents to handle specialized tasks." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 51, + 629, + 139, + 641 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 629, + 139, + 641 + ], + "spans": [ + { + "bbox": [ + 51, + 629, + 139, + 641 + ], + "type": "text", + "content": "3.2 Data Agents" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 643, + 294, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 643, + 294, + 677 + ], + "spans": [ + { + "bbox": [ + 50, + 643, + 294, + 677 + ], + "type": "text", + "content": "In this paper, we define two types of data agents: the Metric Agent and the Log Agent. Each adopts a distinct approach to extract key information from the raw metrics data and log data, respectively." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 677, + 294, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 677, + 294, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 677, + 294, + 710 + ], + "type": "text", + "content": "Metric Agent: The overall workflow of the Metric Agent begins with simple data preprocessing, which includes noise removal and imputation of missing values. After preprocessing, the multivariate" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 316, + 352, + 560, + 526 + ], + "blocks": [ + { + "bbox": [ + 316, + 352, + 560, + 526 + ], + "lines": [ + { + "bbox": [ + 316, + 352, + 560, + 526 + ], + "spans": [ + { + "bbox": [ + 316, + 352, + 560, + 526 + ], + "type": "image", + "image_path": "e799b8f839733eb02eb2359bbd4dcbe3748cb87fc8ec65c27cef3a711b389358.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 340, + 534, + 533, + 546 + ], + "lines": [ + { + "bbox": [ + 340, + 534, + 533, + 546 + ], + "spans": [ + { + "bbox": [ + 340, + 534, + 533, + 546 + ], + "type": "text", + "content": "Figure 2: System Agents Adaptation Workflow" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 563, + 559, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 563, + 559, + 608 + ], + "spans": [ + { + "bbox": [ + 313, + 563, + 559, + 608 + ], + "type": "text", + "content": "time-series data is converted into natural language descriptions using a large model. The generated natural language descriptions capture key aspects of the data, including time intervals, fluctuation trends, and anomaly points." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 608, + 559, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 608, + 559, + 662 + ], + "spans": [ + { + "bbox": [ + 313, + 608, + 559, + 662 + ], + "type": "text", + "content": "Formally, let " + }, + { + "bbox": [ + 313, + 608, + 559, + 662 + ], + "type": "inline_equation", + "content": "\\mathbf{M} = \\{m_1, m_2, \\dots, m_n\\}" + }, + { + "bbox": [ + 313, + 608, + 559, + 662 + ], + "type": "text", + "content": " represent the raw multidimensional metrics data, where each " + }, + { + "bbox": [ + 313, + 608, + 559, + 662 + ], + "type": "inline_equation", + "content": "m_i" + }, + { + "bbox": [ + 313, + 608, + 559, + 662 + ], + "type": "text", + "content": " corresponds to a time-series for a specific metric. The preprocessing step can be represented as Equation 1, where " + }, + { + "bbox": [ + 313, + 608, + 559, + 662 + ], + "type": "inline_equation", + "content": "\\mathbf{M}_{\\mathrm{p}}" + }, + { + "bbox": [ + 313, + 608, + 559, + 662 + ], + "type": "text", + "content": " is the denoised and imputed version of " + }, + { + "bbox": [ + 313, + 608, + 559, + 662 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 313, + 608, + 559, + 662 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 362, + 673, + 558, + 686 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 362, + 673, + 558, + 686 + ], + "spans": [ + { + "bbox": [ + 362, + 673, + 558, + 686 + ], + "type": "interline_equation", + "content": "\\mathbf {M} _ {\\mathrm {p}} = \\text {P r e p r o c e s s} (\\mathbf {M}) = \\left\\{m _ {1} ^ {\\prime}, m _ {2} ^ {\\prime}, \\dots , m _ {n} ^ {\\prime} \\right\\} \\tag {1}", + "image_path": "c356888d6eb6b4b08030830f723c247e588c021a4bfa2535dc850f753e59f561.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 687, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 687, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 314, + 687, + 559, + 710 + ], + "type": "text", + "content": "Next, the processed data is converted into natural language descriptions using a large language model " + }, + { + "bbox": [ + 314, + 687, + 559, + 710 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 314, + 687, + 559, + 710 + ], + "type": "text", + "content": ", as shown in Equation 2," + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 346, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 346, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 346, + 69 + ], + "type": "text", + "content": "AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 423, + 60, + 559, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 423, + 60, + 559, + 69 + ], + "spans": [ + { + "bbox": [ + 423, + 60, + 559, + 69 + ], + "type": "text", + "content": "FSE '25, June 23-28, 2025, Trondheim, Norway" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 50, + 84, + 294, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 84, + 294, + 118 + ], + "spans": [ + { + "bbox": [ + 50, + 84, + 294, + 118 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 50, + 84, + 294, + 118 + ], + "type": "inline_equation", + "content": "\\mathbf{D}_{\\mathrm{nl}}" + }, + { + "bbox": [ + 50, + 84, + 294, + 118 + ], + "type": "text", + "content": " represents the resulting natural language description, which includes information on time intervals, trends, and identified anomalies in the data." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 145, + 129, + 294, + 140 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 129, + 294, + 140 + ], + "spans": [ + { + "bbox": [ + 145, + 129, + 294, + 140 + ], + "type": "interline_equation", + "content": "\\mathbf {D} _ {\\mathrm {n l}} = \\mathcal {L} \\left(\\mathbf {M} _ {\\mathrm {p}}\\right) \\tag {2}", + "image_path": "48afdf90edf72752714f78e53226a211faa8aea67c7d6ed209b1f4cbb1fcf1c6.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 142, + 294, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 142, + 294, + 196 + ], + "spans": [ + { + "bbox": [ + 50, + 142, + 294, + 196 + ], + "type": "text", + "content": "Log Agent: Unlike metrics data, log data is written by developers to monitor the internal state of the system, inherently containing rich semantic information. However, log data often includes substantial redundant information, such as repetitive log entries and messages unrelated to system anomalies." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 196, + 294, + 251 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 196, + 294, + 251 + ], + "spans": [ + { + "bbox": [ + 50, + 196, + 294, + 251 + ], + "type": "text", + "content": "To address this, the Log Agent comprises two main components: sequence compression and semantic compression. Sequence compression focuses on reducing the raw log sequence by consolidating repetitive log patterns, while semantic compression extracts key operational information from the logs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "spans": [ + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "text", + "content": "Sequence Compression: This component utilizes a log-parsing algorithm to transform each log entry into a distinct event template consisting of a static constant part and variable parameters. It then merges consecutive occurrences of identical event templates. Formally, for a given raw log sequence " + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "inline_equation", + "content": "L = \\{l_1, l_2, \\dots, l_N\\}" + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "inline_equation", + "content": "l_n" + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "text", + "content": " represents an individual log entry, the parsing process transforms " + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "text", + "content": " into " + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "inline_equation", + "content": "L_e = \\{e_1, e_2, \\dots, e_N\\}" + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "inline_equation", + "content": "e_i" + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "text", + "content": " represents an event template. Consecutive identical events are merged, yielding " + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "inline_equation", + "content": "G' = \\{e_1', e_2', \\dots, e_C'\\}" + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "inline_equation", + "content": "C \\ll N" + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "inline_equation", + "content": "e_i' = e_i \\times c" + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "text", + "content": " represents the event " + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "inline_equation", + "content": "e_i" + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "text", + "content": " with a count " + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "inline_equation", + "content": "c" + }, + { + "bbox": [ + 50, + 251, + 294, + 361 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 361, + 296, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 361, + 296, + 427 + ], + "spans": [ + { + "bbox": [ + 50, + 361, + 296, + 427 + ], + "type": "text", + "content": "Semantic Compression: This component employs a prompt-based method to convert lengthy log sequences into descriptions of the database's ongoing operations. Formally, given a sequence of raw log entries " + }, + { + "bbox": [ + 50, + 361, + 296, + 427 + ], + "type": "inline_equation", + "content": "L = \\{l_1, l_2, \\dots, l_N\\}" + }, + { + "bbox": [ + 50, + 361, + 296, + 427 + ], + "type": "text", + "content": ", the LLM-based summarization transforms and compresses " + }, + { + "bbox": [ + 50, + 361, + 296, + 427 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 50, + 361, + 296, + 427 + ], + "type": "text", + "content": " into a smaller set of key operational elements " + }, + { + "bbox": [ + 50, + 361, + 296, + 427 + ], + "type": "inline_equation", + "content": "O = \\{o_1, o_2, \\dots, o_M\\}" + }, + { + "bbox": [ + 50, + 361, + 296, + 427 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 50, + 361, + 296, + 427 + ], + "type": "inline_equation", + "content": "M \\ll N" + }, + { + "bbox": [ + 50, + 361, + 296, + 427 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 51, + 436, + 140, + 448 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 436, + 140, + 448 + ], + "spans": [ + { + "bbox": [ + 51, + 436, + 140, + 448 + ], + "type": "text", + "content": "3.3 Task Agents" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 450, + 294, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 450, + 294, + 527 + ], + "spans": [ + { + "bbox": [ + 50, + 450, + 294, + 527 + ], + "type": "text", + "content": "After system agents collect necessary data via data agents, the meta agent coordinates task agents to carry out failure management using trace data. This process involves three agent types—detection, diagnosis, and mitigation—which operate sequentially. Detection agents identify anomalies in each time window; upon detection, diagnosis agents locate and classify the issue; finally, mitigation agents propose solutions based on the diagnosis." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 528, + 296, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 528, + 296, + 572 + ], + "spans": [ + { + "bbox": [ + 50, + 528, + 296, + 572 + ], + "type": "text", + "content": "Though prompts vary by task, all agents follow a common RAG+CoT approach, using historical data as labeled examples—normal/abnormal for detection, failure types for diagnosis—to guide reasoning during querying." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 51, + 580, + 219, + 592 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 580, + 219, + 592 + ], + "spans": [ + { + "bbox": [ + 51, + 580, + 219, + 592 + ], + "type": "text", + "content": "4 PRELIMINARY EVALUATION" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 51, + 596, + 113, + 608 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 596, + 113, + 608 + ], + "spans": [ + { + "bbox": [ + 51, + 596, + 113, + 608 + ], + "type": "text", + "content": "4.1 Design" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 50, + 610, + 294, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 610, + 294, + 686 + ], + "spans": [ + { + "bbox": [ + 50, + 610, + 294, + 686 + ], + "type": "text", + "content": "To evaluate AgentFM, we assess its feasibility and effectiveness in Apache IoTDB. We manually injected 10 types of anomalies, including CPU saturation, IO saturation, memory saturation, network delay increase, network bandwidth limitation, network partition occurrence, workload spikes, accompanying slow queries, excessive data export, and excessive data import. Each anomaly type is injected 20 times." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 50, + 687, + 294, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 687, + 294, + 710 + ], + "spans": [ + { + "bbox": [ + 50, + 687, + 294, + 710 + ], + "type": "text", + "content": "The evaluation is conducted based on Qwen2.5-72b to assess the results of anomaly detection and diagnosis. The performance is" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 314, + 84, + 560, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 84, + 560, + 118 + ], + "spans": [ + { + "bbox": [ + 314, + 84, + 560, + 118 + ], + "type": "text", + "content": "measured using precision, recall, and F1-score metrics. Additionally, the mitigation solutions generated are manually analyzed to determine their usefulness." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 315, + 128, + 380, + 139 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 128, + 380, + 139 + ], + "spans": [ + { + "bbox": [ + 315, + 128, + 380, + 139 + ], + "type": "text", + "content": "4.2 Results" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 314, + 142, + 560, + 198 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 142, + 560, + 198 + ], + "spans": [ + { + "bbox": [ + 314, + 142, + 560, + 198 + ], + "type": "text", + "content": "We utilize the function call feature to guide the model in generating \"yes\" or \"no\" responses to evaluate anomaly detection performance. For failure diagnosis, we manually review the LLM-generated results to identify the classification most closely matching the pre-injected anomalies and assessed the outcomes accordingly." + } + ] + } + ], + "index": 17 + }, + { + "type": "table", + "bbox": [ + 335, + 232, + 539, + 277 + ], + "blocks": [ + { + "bbox": [ + 337, + 210, + 536, + 221 + ], + "lines": [ + { + "bbox": [ + 337, + 210, + 536, + 221 + ], + "spans": [ + { + "bbox": [ + 337, + 210, + 536, + 221 + ], + "type": "text", + "content": "Table 3: Anomaly Detection & Diagnosis Results" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 335, + 232, + 539, + 277 + ], + "lines": [ + { + "bbox": [ + 335, + 232, + 539, + 277 + ], + "spans": [ + { + "bbox": [ + 335, + 232, + 539, + 277 + ], + "type": "table", + "html": "
TaskPrecisionRecallF1-Score
Anomaly Detection95.14%97.03%95.76%
Failure Diagnosis89.61%87.04%87.62%
", + "image_path": "82e7fce4ff67186806eecc16a65a5aafad2abe658c41fa7061b4b2c82185027f.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "table_body" + } + ], + "index": 19 + }, + { + "bbox": [ + 314, + 293, + 560, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 293, + 560, + 359 + ], + "spans": [ + { + "bbox": [ + 314, + 293, + 560, + 359 + ], + "type": "text", + "content": "As shown in Table 3, AgentFM demonstrates promising performance in our preliminary experiments. However, based on our observations, the model struggles to identify correct results when dealing with complex internal anomalies (e.g., excessive data import). We plan to focus on optimizing performance for such scenarios in future work." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 320, + 380, + 424, + 389 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 380, + 424, + 389 + ], + "spans": [ + { + "bbox": [ + 320, + 380, + 424, + 389 + ], + "type": "text", + "content": "Increase CPU Resources:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 320, + 391, + 554, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 391, + 554, + 422 + ], + "spans": [ + { + "bbox": [ + 320, + 391, + 554, + 422 + ], + "type": "text", + "content": "Iotdb0 frequently experiences high CPU usage, consider increasing the number of CPU cores allocated to the container or raising the CPU limit." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 320, + 424, + 389, + 434 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 424, + 389, + 434 + ], + "spans": [ + { + "bbox": [ + 320, + 424, + 389, + 434 + ], + "type": "text", + "content": "Load Balancing:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 320, + 435, + 553, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 435, + 553, + 456 + ], + "spans": [ + { + "bbox": [ + 320, + 435, + 553, + 456 + ], + "type": "text", + "content": "If iotdb0 is part of a cluster, consider distributing some of the load to other nodes to achieve load balancing." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 328, + 478, + 545, + 490 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 328, + 478, + 545, + 490 + ], + "spans": [ + { + "bbox": [ + 328, + 478, + 545, + 490 + ], + "type": "text", + "content": "Figure 3: Sample Mitigation Solutions from AgentFM" + } + ] + } + ], + "index": 25, + "type": "text" + }, + { + "bbox": [ + 314, + 506, + 560, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 506, + 560, + 562 + ], + "spans": [ + { + "bbox": [ + 314, + 506, + 560, + 562 + ], + "type": "text", + "content": "We also conduct a manual analysis of the mitigation solutions recommended by AgentFM. One example is illustrated in Figure 3. We extract the key information from the solution and find that these recommendations provide targeted and effective assistance in system remediation." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 315, + 571, + 406, + 582 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 571, + 406, + 582 + ], + "spans": [ + { + "bbox": [ + 315, + 571, + 406, + 582 + ], + "type": "text", + "content": "5 CONCLUSION" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 314, + 586, + 560, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 586, + 560, + 662 + ], + "spans": [ + { + "bbox": [ + 314, + 586, + 560, + 662 + ], + "type": "text", + "content": "This paper investigates the varying significance of different roles in failure management. Building on this insight, we propose AgentFM, a role-aware failure management framework for distributed databases powered by LLM-driven multi-agents. Our preliminary experiments demonstrate the feasibility of AgentFM. In the future, we plan to further enhance AgentFM's capabilities in automating remediation execution." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 315, + 673, + 437, + 683 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 673, + 437, + 683 + ], + "spans": [ + { + "bbox": [ + 315, + 673, + 437, + 683 + ], + "type": "text", + "content": "ACKNOWLEDGEMENT" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 314, + 687, + 559, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 687, + 559, + 710 + ], + "spans": [ + { + "bbox": [ + 314, + 687, + 559, + 710 + ], + "type": "text", + "content": "This work is supported by Key-Area Research and Development Program of Guangdong Province, China (NO.2020B010164003)." + } + ] + } + ], + "index": 30 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 188, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 188, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 188, + 69 + ], + "type": "text", + "content": "FSE '25, June 23-28, 2025, Trondheim, Norway" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 497, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 497, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 497, + 60, + 558, + 69 + ], + "type": "text", + "content": "Lingzhe Zhang et al." + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 52, + 83, + 124, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 52, + 83, + 124, + 95 + ], + "spans": [ + { + "bbox": [ + 52, + 83, + 124, + 95 + ], + "type": "text", + "content": "REFERENCES" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 53, + 97, + 294, + 576 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 56, + 97, + 294, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 97, + 294, + 129 + ], + "spans": [ + { + "bbox": [ + 56, + 97, + 294, + 129 + ], + "type": "text", + "content": "[1] Toufique Ahmed, Supriyo Ghosh, Chetan Bansal, Thomas Zimmermann, Xuchao Zhang, and Saravan Rajmohan. 2023. Recommending root-cause and mitigation steps for cloud incidents using large language models. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1737-1749." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 129, + 294, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 129, + 294, + 161 + ], + "spans": [ + { + "bbox": [ + 55, + 129, + 294, + 161 + ], + "type": "text", + "content": "[2] James C Corbett, Jeffrey Dean, Michael Epstein, Andrew Fikes, Christopher Frost, Jeffrey John Furman, Sanjay Ghemawat, Andrey Gubarev, Christopher Heiser, Peter Hochschild, et al. 2013. Spanner: Google's globally distributed database. ACM Transactions on Computer Systems (TOCS) 31, 3 (2013), 1-22." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 162, + 294, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 162, + 294, + 201 + ], + "spans": [ + { + "bbox": [ + 56, + 162, + 294, + 201 + ], + "type": "text", + "content": "[3] Drishti Goel, Fiza Husain, Aditya Singh, Supriyo Ghosh, Anjaly Parayil, Chetan Bansal, Xuchao Zhang, and Saravan Rajmohan. 2024. X-lifecycle learning for cloud incident management using llms. In Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering. 417-428." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 201, + 294, + 233 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 201, + 294, + 233 + ], + "spans": [ + { + "bbox": [ + 56, + 201, + 294, + 233 + ], + "type": "text", + "content": "[4] Adha Hrusto, Per Runeson, and Magnus C Ohlsson. 2024. Autonomous monitors for detecting failures early and reporting interpretable alerts in cloud operations. In Proceedings of the 46th International Conference on Software Engineering: Software Engineering in Practice. 47-57." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 233, + 294, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 233, + 294, + 258 + ], + "spans": [ + { + "bbox": [ + 56, + 233, + 294, + 258 + ], + "type": "text", + "content": "[5] Dongxu Huang, Qi Liu, Qiu Cui, Zhuhe Fang, Xiaoyu Ma, Fei Xu, Li Shen, Liu Tang, Yuxing Zhou, Menglong Huang, et al. 2020. TiDB: a Raft-based HTAP database. Proceedings of the VLDB Endowment 13, 12 (2020), 3072-3084." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 258, + 294, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 258, + 294, + 289 + ], + "spans": [ + { + "bbox": [ + 56, + 258, + 294, + 289 + ], + "type": "text", + "content": "[6] Jun Huang, Yang Yang, Hang Yu, Jianguo Li, and Xiao Zheng. 2023. Twin graph-based anomaly detection via attentive multi-modal learning for microservice system. In 2023 38th IEEE/ACM International Conference on Automated Software Engineering (ASE). IEEE, 66-78." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 289, + 294, + 321 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 289, + 294, + 321 + ], + "spans": [ + { + "bbox": [ + 56, + 289, + 294, + 321 + ], + "type": "text", + "content": "[7] Yuyuan Kang, Xiangdong Huang, Shaoxu Song, Lingzhe Zhang, Jialin Qiao, Chen Wang, Jianmin Wang, and Julian Feinauer. 2022. Separation or not: On handing out-of-order time-series data in leveled LSM-tree. In 2022 IEEE 38th International Conference on Data Engineering (ICDE). IEEE, 3340-3352." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 321, + 294, + 353 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 321, + 294, + 353 + ], + "spans": [ + { + "bbox": [ + 56, + 321, + 294, + 353 + ], + "type": "text", + "content": "[8] Cheryl Lee, Tianyi Yang, Zhuangbin Chen, Yuxin Su, and Michael R Lyu. 2023. Eadro: An end-to-end troubleshooting framework for microservices on multi-source data. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1750-1762." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 353, + 294, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 353, + 294, + 385 + ], + "spans": [ + { + "bbox": [ + 56, + 353, + 294, + 385 + ], + "type": "text", + "content": "[9] Cheryl Lee, Tianyi Yang, Zhuangbin Chen, Yuxin Su, Yongqiang Yang, and Michael R Lyu. 2023. Heterogeneous anomaly detection for software systems via semi-supervised cross-modal attention. In 2023 IEEE/ACM 45th International Conference on Software Engineering (ICSE). IEEE, 1724-1736." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 53, + 385, + 294, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 385, + 294, + 415 + ], + "spans": [ + { + "bbox": [ + 53, + 385, + 294, + 415 + ], + "type": "text", + "content": "[10] Cheng-Ming Lin, Ching Chang, Wei-Yao Wang, Kuang-Da Wang, and Wen-Chih Peng. 2024. Root Cause Analysis in Microservice Using Neural Granger Causal Discovery. In Proceedings of the AAAI Conference on Artificial Intelligence, Vol. 38. 206-213." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 53, + 416, + 294, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 416, + 294, + 449 + ], + "spans": [ + { + "bbox": [ + 53, + 416, + 294, + 449 + ], + "type": "text", + "content": "[11] Minghua Ma, Zheng Yin, Shenglin Zhang, Sheng Wang, Christopher Zheng, Xinhao Jiang, Hanwen Hu, Cheng Luo, Yilin Li, Nengjun Qiu, et al. 2020. Diagnosing root causes of intermittent slow queries in cloud databases. Proceedings of the VLDB Endowment 13, 8 (2020), 1176-1189." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 53, + 449, + 294, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 449, + 294, + 480 + ], + "spans": [ + { + "bbox": [ + 53, + 449, + 294, + 480 + ], + "type": "text", + "content": "[12] Devjeet Roy, Xuchao Zhang, Rashi Bhave, Chetan Bansal, Pedro Las-Casas, Rodrigo Fonseca, and Saravan Rajmohan. 2024. Exploring IIm-based agents for root cause analysis. In *Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering.* 208-219." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 53, + 480, + 294, + 520 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 480, + 294, + 520 + ], + "spans": [ + { + "bbox": [ + 53, + 480, + 294, + 520 + ], + "type": "text", + "content": "[13] Manish Shetty, Yinfang Chen, Gagan Somashekar, Minghua Ma, Yogesh Simmhan, Xuchao Zhang, Jonathan Mace, Dax Vandevoorde, Pedro Las-Casas, Shachee Mishra Gupta, et al. 2024. Building AI Agents for Autonomous Clouds: Challenges and Design Principles. In Proceedings of the 2024 ACM Symposium on Cloud Computing, 99-110." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 53, + 520, + 294, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 520, + 294, + 552 + ], + "spans": [ + { + "bbox": [ + 53, + 520, + 294, + 552 + ], + "type": "text", + "content": "[14] Chen Wang, Xiangdong Huang, Jialin Qiao, Tian Jiang, Lei Rui, Jinrui Zhang, Rong Kang, Julian Feinauer, Kevin A McGrail, Peng Wang, et al. 2020. Apache iotdb: time-series database for internet of things. Proceedings of the VLDB Endowment 13, 12 (2020), 2901-2904." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 53, + 552, + 294, + 576 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 53, + 552, + 294, + 576 + ], + "spans": [ + { + "bbox": [ + 53, + 552, + 294, + 576 + ], + "type": "text", + "content": "[15] Lin Yang, Junjie Chen, Zan Wang, Weijing Wang, Jiajun Jiang, Xuyuan Dong, and Wenbin Zhang. 2021. Semi-supervised log-based anomaly detection via probabilistic label estimation. In 2021 IEEE/ACM 43rd International Conference on" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 87, + 559, + 579 + ], + "type": "list", + "angle": 0, + "index": 35, + "blocks": [ + { + "bbox": [ + 331, + 87, + 463, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 331, + 87, + 463, + 95 + ], + "spans": [ + { + "bbox": [ + 331, + 87, + 463, + 95 + ], + "type": "text", + "content": "Software Engineering (ICSE). IEEE, 1448-1460." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 317, + 95, + 559, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 95, + 559, + 126 + ], + "spans": [ + { + "bbox": [ + 317, + 95, + 559, + 126 + ], + "type": "text", + "content": "[16] Zhenkun Yang, Chuanhui Yang, Fusheng Han, Mingqiang Zhuang, Bing Yang, Zhifeng Yang, Xiaojun Cheng, Yuzhong Zhao, Wenhui Shi, Huafeng Xi, et al. 2022. OceanBase: a 707 million tpmC distributed relational database system. Proceedings of the VLDB Endowment 15, 12 (2022), 3385-3397." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 317, + 127, + 559, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 127, + 559, + 159 + ], + "spans": [ + { + "bbox": [ + 317, + 127, + 559, + 159 + ], + "type": "text", + "content": "[17] Chenxi Zhang, Xin Peng, Chaofeng Sha, Ke Zhang, Zhenqing Fu, Xiya Wu, Qingwei Lin, and Dongmei Zhang. 2022. Deepralog: Trace-log combined microservice anomaly detection through graph-based deep learning. In Proceedings of the 44th international conference on software engineering. 623-634." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 317, + 159, + 559, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 159, + 559, + 198 + ], + "spans": [ + { + "bbox": [ + 317, + 159, + 559, + 198 + ], + "type": "text", + "content": "[18] Dylan Zhang, Xuchao Zhang, Chetan Bansal, Pedro Las-Casas, Rodrigo Fonseca, and Saravan Rajmohan. 2024. LM-PACE: Confidence estimation by large language models for effective root causing of cloud incidents. In *Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering*, 388-398." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 198, + 559, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 198, + 559, + 228 + ], + "spans": [ + { + "bbox": [ + 316, + 198, + 559, + 228 + ], + "type": "text", + "content": "[19] Lingzhe Zhang, Tong Jia, Mengxi Jia, Ying Li, Yong Yang, and Zhonghai Wu. 2024. Multivariate Log-based Anomaly Detection for Distributed Database. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining. 4256-4267." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 228, + 559, + 260 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 228, + 559, + 260 + ], + "spans": [ + { + "bbox": [ + 316, + 228, + 559, + 260 + ], + "type": "text", + "content": "[20] Lingzhe Zhang, Tong Jia, Mengxi Jia, Hongyi Liu, Yong Yang, Zhonghai Wu, and Ying Li. 2024. Towards Close-To-Zero Runtime Collection Overhead: Raft-Based Anomaly Diagnosis on System Faults for Distributed Storage System. IEEE Transactions on Services Computing (2024)." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 261, + 559, + 292 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 261, + 559, + 292 + ], + "spans": [ + { + "bbox": [ + 316, + 261, + 559, + 292 + ], + "type": "text", + "content": "[21] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Aiwei Liu, Yong Yang, Zhonghai Wu, Xuming Hu, Philip S Yu, and Ying Li. 2024. A Survey of AIOps for Failure Management in the Era of Large Language Models. arXiv preprint arXiv:2406.11213 (2024)." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 293, + 559, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 293, + 559, + 324 + ], + "spans": [ + { + "bbox": [ + 316, + 293, + 559, + 324 + ], + "type": "text", + "content": "[22] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Hongyi Liu, and Ying Li. 2025. ScalaLog: Scalable Log-Based Failure Diagnosis Using LLM. In ICASSP 2025-2025 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP). IEEE, 1-5." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 324, + 559, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 324, + 559, + 356 + ], + "spans": [ + { + "bbox": [ + 316, + 324, + 559, + 356 + ], + "type": "text", + "content": "[23] Lingzhe Zhang, Tong Jia, Mengxi Jia, Yifan Wu, Hongyi Liu, and Ying Li. 2025. XRAGLog: A Resource-Efficient and Context-Aware Log-Based Anomaly Detection Method Using Retrieval-Augmented Generation. In AAAI 2025 Workshop on Preventing and Detecting LLM Misinformation (PDLM)." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 356, + 559, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 356, + 559, + 388 + ], + "spans": [ + { + "bbox": [ + 316, + 356, + 559, + 388 + ], + "type": "text", + "content": "[24] Lingzhe Zhang, Tong Jia, Kangjin Wang, Mengxi Jia, Yong Yang, and Ying Li. 2024. Reducing Events to Augment Log-based Anomaly Detection Models: An Empirical Study. In Proceedings of the 18th ACM/IEEE International Symposium on Empirical Software Engineering and Measurement. 538-548." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 388, + 559, + 420 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 388, + 559, + 420 + ], + "spans": [ + { + "bbox": [ + 316, + 388, + 559, + 420 + ], + "type": "text", + "content": "[25] Ling-Zhe Zhang, Xiang-Dong Huang, Yan-Kai Wang, Jia-Lin Qiao, Shao-Xu Song, and Jian-Min Wang. 2024. Time-tired compaction: An elastic compaction scheme for LSM-tree based time-series database. Advanced Engineering Informatics 59 (2024), 102224." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 316, + 420, + 559, + 452 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 420, + 559, + 452 + ], + "spans": [ + { + "bbox": [ + 316, + 420, + 559, + 452 + ], + "type": "text", + "content": "[26] Shenglin Zhang, Pengxiang Jin, Zihan Lin, Yongqian Sun, Bicheng Zhang, Sibo Xia, Zhengdan Li, Zhenyu Zhong, Minghua Ma, Wa Jin, et al. 2023. Robust failure diagnosis of microservice system through multimodal data. IEEE Transactions on Services Computing 16, 6 (2023), 3851-3864." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 316, + 452, + 559, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 452, + 559, + 483 + ], + "spans": [ + { + "bbox": [ + 316, + 452, + 559, + 483 + ], + "type": "text", + "content": "[27] Wei Zhang, Hongcheng Guo, Jian Yang, Yi Zhang, Chaoran Yan, Zhoujin Tian, Hangyuan Ji, Zhoujun Li, Tongliang Li, Tieqiao Zheng, et al. 2024. mABC: multi-Agent Blockchain-Inspired Collaboration for root cause analysis in micro-services architecture. arXiv preprint arXiv:2404.12135 (2024)." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 317, + 483, + 559, + 516 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 483, + 559, + 516 + ], + "spans": [ + { + "bbox": [ + 317, + 483, + 559, + 516 + ], + "type": "text", + "content": "[28] Xuchao Zhang, Supriyo Ghosh, Chetan Bansal, Rujia Wang, Minghua Ma, Yu Kang, and Saravan Rajmohan. 2024. Automated root causing of cloud incidents using in-context learning with gpt-4. In Companion Proceedings of the 32nd ACM International Conference on the Foundations of Software Engineering. 266-277." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 317, + 516, + 559, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 516, + 559, + 555 + ], + "spans": [ + { + "bbox": [ + 317, + 516, + 559, + 555 + ], + "type": "text", + "content": "[29] Nengwen Zhao, Junjie Chen, Zhaoyang Yu, Honglin Wang, Jiesong Li, Bin Qiu, Hongyu Xu, Wenchi Zhang, Kaixin Sui, and Dan Pei. 2021. Identifying bad software changes via multimodal anomaly detection for online service systems. In Proceedings of the 29th ACM Joint Meeting on European Software Engineering Conference and Symposium on the Foundations of Software Engineering. 527-539." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 317, + 555, + 559, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 555, + 559, + 579 + ], + "spans": [ + { + "bbox": [ + 317, + 555, + 559, + 579 + ], + "type": "text", + "content": "[30] Lecheng Zheng, Zhengzhang Chen, Jingrui He, and Haifeng Chen. 2024. Multimodal Causal Structure Learning and Root Cause Analysis. arXiv preprint arXiv:2402.02357 (2024)." + } + ] + } + ], + "index": 34 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 51, + 60, + 345, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 51, + 60, + 345, + 69 + ], + "spans": [ + { + "bbox": [ + 51, + 60, + 345, + 69 + ], + "type": "text", + "content": "AgentFM: Role-Aware Failure Management for Distributed Databases with LLM-Driven Multi-Agents" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 424, + 60, + 558, + 69 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 424, + 60, + 558, + 69 + ], + "spans": [ + { + "bbox": [ + 424, + 60, + 558, + 69 + ], + "type": "text", + "content": "FSE '25, June 23-28, 2025, Trondheim, Norway" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06821/04e4beb1-0999-4f5d-99d2-8bd91900a008_content_list.json b/data/2025/2504_06xxx/2504.06821/04e4beb1-0999-4f5d-99d2-8bd91900a008_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..a4579926507821c5cc1f9aaae28ea78827a555e9 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/04e4beb1-0999-4f5d-99d2-8bd91900a008_content_list.json @@ -0,0 +1,2398 @@ +[ + { + "type": "text", + "text": "Inducing Programmatic Skills for Agentic Tasks", + "text_level": 1, + "bbox": [ + 171, + 99, + 692, + 122 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zora Zhiruo Wang Apurva Gandhi Graham Neubig Daniel Fried", + "bbox": [ + 179, + 146, + 715, + 162 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Carnegie Mellon University", + "bbox": [ + 183, + 162, + 387, + 176 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{zhiruow,apurvag,gneubig,dfried}@cs.cmu.edu", + "bbox": [ + 183, + 176, + 537, + 190 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 457, + 210, + 540, + 227 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To succeed in common digital tasks such as web navigation, agents must carry out a variety of specialized tasks such as searching for products or planning a travel route. To tackle these tasks, agents can bootstrap themselves by learning task-specific skills online through interaction with the web environment. In this work, we demonstrate that programs are an effective representation for skills. We propose agent skill induction (ASI), which allows agents to adapt themselves by inducing, verifying, and utilizing program-based skills on the fly. We start with an evaluation on the WebArena agent benchmark and show that ASI outperforms the static baseline agent and its text-skill counterpart by $23.5\\%$ and $11.3\\%$ in success rate, mainly thanks to the programmatic verification guarantee during the induction phase. ASI also improves efficiency by reducing $10.7 - 15.3\\%$ of the steps over baselines, by composing primitive actions (e.g., click) into higher-level skills (e.g., search_product). We then highlight the efficacy of ASI in remaining efficient and accurate under scaled-up web activities. Finally, we examine the generalizability of induced skills when transferring between websites, and find that ASI can effectively reuse common skills, while also updating incompatible skills to versatile website changes. $^{1}$", + "bbox": [ + 228, + 243, + 769, + 498 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 522, + 320, + 537 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To achieve success in common digital tasks such as web navigation, it is essential for agents to be able to perform a variety of specialized tasks such as searching for products on a shopping website (Yao et al., 2022; Deng et al., 2024) or finding a driving route on the map (Zhou et al., 2024a; Xie et al., 2024a). While one source for agents to learn such tasks is demonstrations annotated by humans (Deng et al., 2024) or synthesized with large language models (LMs) on websites of interest (Murty et al., 2024b;a), this can be a challenging offline learning procedure given the broad range of website domains and functionalities, especially for the collected demonstrations to match or cover the distribution of tasks queried at inference time (Zhou et al., 2024b); not to mention the limitations in resources to collect abundant high-quality data at ease (Pan et al., 2024).", + "bbox": [ + 169, + 550, + 826, + 691 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Instead of learning from demonstrations offline, an alternative way is to learn these tasks directly online from test queries to prevent potential distribution mismatch between demonstration and downstream tasks (Levine et al., 2020). Some works propose to have agents induce casual abstractions (Majumder et al., 2024), single-state guidelines (Fu et al., 2024), or multi-step procedural workflows (Sarch et al., 2024; Wang et al., 2024b) as a form of intermediate knowledge to augment agent memory via non-parametric approaches (Brown et al., 2020). Nonetheless, most existing approaches represent this knowledge in text, offering limited quality and verification guarantees. In this work, we propose that executable programs are effective representations for intermediate skill acquisition, given their verifiability and composability advantages (Setlur et al., 2025).", + "bbox": [ + 169, + 696, + 828, + 837 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We present ASI, namely agent skill induction (§2), that induces and applies programmatic skills along the process of solving user web navigation queries. More concretely, given a natural language (NL) query, the agent first generates an action trajectory attempting to solve the task using built-in, primitive actions such as click and scroll. The agent then", + "bbox": [ + 169, + 842, + 826, + 902 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 31, + 517, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1https://github.com/zorazrw/agent-skill-induction", + "bbox": [ + 189, + 907, + 540, + 925 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.06821v2 [cs.CL] 29 Aug 2025", + "bbox": [ + 22, + 275, + 60, + 724 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "induces higher-level skills (e.g., search_product(name)) that wrap primitive actions or prior skills as executable programs, accompanied with corresponding test trajectories to verify their quality. Verified skills are then incorporated into the agent action space and can be directly called to solve future tasks with similar procedures, as depicted in Figure 1 (bottom).", + "bbox": [ + 169, + 103, + 826, + 161 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We first evaluate ASI on the WebArena benchmark (Zhou et al., 2024a) (§3) and demonstrate that our online, adaptive ASI surpasses its static agent baseline without adaptive components by $23.5\\%$ in success rate. To validate the advantage of using programmatic representations for skills, we further compare to an adaptive agent, AWM (Wang et al., 2024b), that represents skills in memory as non-executable texts (Figure 1 top); we find ASI scores $11.3\\%$ higher success rate by employing verifiable, programmatic skills (Figure 1 bottom). Beyond the correctness aspect, the task-solving procedures by ASI-supported agents are $10.7 - 15.3\\%$ more efficient than the baseline approaches, mainly because of the action space abstraction and composition enabled by the programmatic skill representation.", + "bbox": [ + 169, + 166, + 455, + 459 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/e184163a935c302d2edc322fe07a755bc00b82f9292ce321df60b7e979ab1931.jpg", + "image_caption": [ + "Figure 1: Online adaptive agent that induces and reuses programmatic skills as actions (bottom), as opposed to adding textual skills in memory (top)." + ], + "image_footnote": [], + "bbox": [ + 464, + 167, + 821, + 402 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We further stress test ASI on scaled-up scenarios (S4) that require substantially longer-horizon trajectories to complete the task. Across various domains such as shopping and social forums, we find the efficiency advantage offered by ASI is more pronounced, reducing action trajectories by 9.5 and 5.6 average steps compared to static and text-form agents. Facilitated by this improved procedural efficiency and planning, we find that ASI agent adheres to the optimal trajectory better and achieves tasks $20.7 - 38.9\\%$ more correctly.", + "bbox": [ + 169, + 464, + 826, + 551 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Finally, we study agent behaviors on generalizing induced skills to other websites (§5), particularly from sandboxed, simulated websites to real-world websites of similar domains. While ASI agents effectively transfer common skills (e.g., search_product) to new websites, some skills may be incompatible with alternative website designs hence less useful. Nonetheless, ASI can quickly refine its prior skills or create new skills on new websites, indicating it allows agents to adapt online while maintaining verifiability via programs.", + "bbox": [ + 169, + 555, + 828, + 642 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In short, ASI enhances web agent success and efficiency by inducing and applying verifiable programmatic skills, in general and longer-horizon tasks, even across varied websites.", + "bbox": [ + 169, + 645, + 823, + 676 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Agent Skill Induction", + "text_level": 1, + "bbox": [ + 171, + 694, + 401, + 713 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this section, we first lay out the web agent problem setup (§2.1) and introduce online, self-adaptive agents (§2.2). We then describe the core component of ASI—programmatic skill induction and verification (§2.3).", + "bbox": [ + 169, + 722, + 826, + 766 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 Problem Statement: Online Adaptive Agent", + "text_level": 1, + "bbox": [ + 169, + 782, + 540, + 799 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "For the scope of this work, we focus on language model (LM) based agents, where each agent policy consists of an LM backbone $\\mathcal{L}$ , a memory $\\mathcal{M}$ , and a skill library $\\mathcal{A}$ , as illustrated in Figure 1 top and bottom. In the implementation, the memory $\\mathcal{M}$ and the skill library $\\mathcal{A}$ are provided as input context to the LM backbone. We denote the agent policy as $\\pi_{\\mathcal{L}}(\\cdot|\\mathcal{M},\\mathcal{A})$ and $\\pi_{\\mathcal{L}}$ for short. We focus on the web browser environment defined by a transition function $\\mathcal{T}(s'|s,a)$ that models the change in the webpage after an action.", + "bbox": [ + 169, + 804, + 823, + 891 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We focus on an online adaptation scenario where we have access to a sequence of NL queries $Q = \\{q_{1}, q_{2}, \\dots, q_{N}\\}$ specifying the tasks, and no other information such as demonstration", + "bbox": [ + 169, + 895, + 823, + 926 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 491, + 946, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "trajectories or ground-truth rewards are available (Wang et al., 2024a,b). For each task specified by a natural language (NL) query $q$ , the agent generates a trajectory of actions $\\tau = (s_0, a_0, s_1, a_1, \\dots, s_{H-1}, a_{H-1}, s_H)$ for a finite number of $H$ steps. At each time step $h$ in the horizon, the agent receives observation $o_h$ from the current state $s_h$ , and generates an action $a_h \\in \\mathcal{A}$ based on the observations and actions so far, via $\\pi_{\\mathcal{L}}(o_{0:h}, a_{0:h-1}; \\mathcal{M}, \\mathcal{A}) \\to a_h$ . The generated action will be executed on the environment and incurs a state change $\\mathcal{T}(s_h, a_h) \\to s_{h+1}$ . This observe-act loop continues for $H$ steps until the task reaches a task-terminating condition, such as the agent generating a termination action (e.g., send msg_to_user) or the horizon reaches a pre-determined maximum number of steps $h = H_{max}$ . We denote each pair of query and trajectory $(q, \\tau) := e$ as an episode $e$ . Agents can update the content in $\\mathcal{M}$ and $\\mathcal{A}$ and reuse them across episodes.", + "bbox": [ + 169, + 103, + 826, + 257 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 Inducing Reusable Skills", + "text_level": 1, + "bbox": [ + 171, + 276, + 401, + 291 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To realize online adaptive agents, one common approach is to induce skills from correct trajectories to update the agent (Wang et al., 2024b). But since ground-truth rewards are unavailable, an LLM-based evaluator $V_{\\mathcal{L}}$ is often used to judge the correctness of episodes. Formally, from the total of $N$ episodes throughout the online process $\\{e^1,\\dots ,e^N\\} \\coloneqq \\mathcal{E}$ , we employ an LM-based evaluator $V_{\\mathcal{L}}(e)\\to 0 / 1$ to filter out the episodes predicted as correct $\\mathcal{E}_V = \\{e_i\\in \\mathcal{E}|V_{\\mathcal{L}}(e_i) = 1,i\\in \\{1,\\dots ,N\\} \\}$ and perform skill induction only on $\\mathcal{E}_V$ .", + "bbox": [ + 169, + 299, + 823, + 387 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Central to our adaptive agents is an induction component $I$ that enables the adaptivity of agents, which can be rule-based (Ellis et al., 2023; Grand et al., 2024) or instantiated by an LM $I(\\cdot |LM)$ (Wang et al., 2024b); we follow the latter for its better performance and use $I$ to represent the module for simplicity. For online adaptive agents $\\pi_{\\mathcal{L}}$ , to induce skills, $I$ is instructed to take in one filtered episode $e$ and output one or more pieces of desired skills $D = \\{d\\}$ , denoted as $I(e)\\to \\mathcal{D}$ . Following AWM (Wang et al., 2024b), we update the agent in non-parametric ways that incorporate the induction outcome $I(e_t)\\rightarrow d_t$ into the agent, instead of updating the parameters of the underlying LM backbone $\\mathcal{L}$ for agent policy $\\pi_{\\mathcal{L}}$ .", + "bbox": [ + 169, + 392, + 826, + 506 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Unlike AWM which represents skills in free-form text representations and can only augment agent memory via $\\mathcal{M}_t \\cup \\{d_t\\} \\to \\mathcal{M}_{t+1}$ (Figure 1 top), we introduce ASI that represents skills as executable python programs, and directly integrate skills into the agent action space instead, via $\\mathcal{A}_t \\cup \\{d_t\\} \\to \\mathcal{A}_{t+1}$ (Figure 1 bottom).", + "bbox": [ + 169, + 510, + 823, + 568 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3 Inducing and Verifying Programmatic Skills", + "text_level": 1, + "bbox": [ + 169, + 585, + 542, + 602 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To improve the induction quality, we propose a change in representation from free-form text to executable programs, which offers advantages in correctness and efficiency. For one, the program format enables ready verification on skill correctness by executing them; for another, skill programs abstract multiple lower-level actions into a higher-level function call, thus agents can solve tasks in fewer steps without tackling tricky low-level details.", + "bbox": [ + 169, + 609, + 823, + 680 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Inducing Programmatic Skills We first clean the input episodes to ensure the induction quality. We remove all the steps that cause execution errors such as invalid argument format, to keep these invalid actions from distracting agent predictions. Furthermore, noticing the long and possibly redundant thought process generated by agents along with each action, we simplify each thought text paragraph into a short one-sentence description (e.g., \"Clicked the directions button to access the route planning feature\") using LM, effectively reducing the thought content from 87.9 to 13.4 tokens per step.", + "bbox": [ + 169, + 686, + 826, + 785 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Given a clean input episode $e$ , we now prompt the induction module $I$ to produce one or more program functions to represent reusable skills $\\mathcal{D} = \\{d\\}$ as executable programs. As exemplified in Figure 2, given the input episode on the left side, the induction module first produces two skills open网络营销reviews() and searchreviews_search_box_id, search_button_id, search_term) in the form of callable program functions.", + "bbox": [ + 169, + 790, + 826, + 862 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Skill Verification With the programmatic nature of ASI's skills, we can readily verify their correctness by executing them and checking if tasks can be solved successfully. While a naive way is to query the agent with the same NL query and allow it to use newly induced skill actions, we find agents may not always use new skills due to the large search space of", + "bbox": [ + 169, + 867, + 826, + 925 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/3f8df25e83e327c28fe7fb5b61b7b295d304d82e52ae78b95dd96f11da1379ce.jpg", + "image_caption": [ + "Figure 2: Inducing programmatic skills and rewriting the trajectory from an episode." + ], + "image_footnote": [], + "bbox": [ + 194, + 85, + 803, + 229 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "possible action trajectories. To have agents more efficiently generate trajectories that test skills in a more targeted way, we curate a rewritten trajectory prefix $\\tau_{D}$ to constrain the first few steps executed in the environment, by rewriting and truncating the input action trajectory $\\tau$ , and subsequently asking the agent to complete the prefix to get a full, checkable trajectory $\\tau_{f}$ . Concretely, we first take the original action trajectory in the input episode $\\tau$ (consisting of primitive actions or previously learned skills), and ask the induction module $I$ to transform it to a skill-using trajectory (Figure 2 bottom right), by replacing sub-trajectories in $\\tau$ with calls to the newly induced skill programs $\\mathcal{D}$ , if possible. Zooming into the Figure 2 example, this procedure merges click('Marketing') $\\rightarrow$ click('All Reviews') to an open_marketreviews() call; transforms fill(757, 'satisfied') $\\rightarrow$ click('Search') to a call of the second skill searchreviews('satisfied') with the specified term 'satisfied'; and adopted the last send msg_to_user('2') step directly. Note that we follow Wang et al. (2024b) and induce skills according to each website, so some skills could be tailored to particular webpage contexts such as the 'Marketing' and 'All Reviews' link constants in open_marketreviews, while other skills apply to more versatile setups such as searching for different reviews in searchreviews.", + "bbox": [ + 169, + 268, + 826, + 494 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Next, to avoid spurious successes in skill verification, we truncate the trajectory yielded above by removing any trailing primitive actions after the last call to a skill program. Taking Figure 2 as an example, in the original input trajectory, the last send msg_to_user('2') already sends over the correct answer '2' to the user. However, if we directly adopt this last step into the skill-using trajectory $\\tau_{D}$ , then executing it will always return the correct message to the user, regardless of whether the previous skill calls are valid. We thus remove such trailing actions to make sure verification attends to the induced skills we are testing.", + "bbox": [ + 169, + 500, + 823, + 599 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "After rewriting and truncation, we get this skill-using trajectory $\\tau_{D}$ as the prefix to test skills. We now query the agent $\\pi$ again with the same NL query $q$ and first execute $\\tau_{D}$ on the environment. We then allow agents to continue generating up to $H_{max} - |\\tau_{D}|$ actions to finish the task. In the Figure 2 example, to successfully solve query $q$ , we expect the agent to generate another step of send msg_to_user(...). with the correct answer '2' in the message. We take the concatenation of the trajectory prefix $\\tau_{D}$ and the later additionally produced new steps $\\tau_{A}$ (e.g., [send msg_to_user('2')]) as the full trajectory $\\tau_{f}$ . We then decide whether to add the induced programs $\\mathcal{D}$ into the agent skill library as $\\mathcal{A}$ by examining $\\tau_{f}$ .", + "bbox": [ + 169, + 604, + 826, + 720 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Specifically, we check $\\tau_{f}$ from three dimensions: (1) Correctness: if executing $\\tau_{f}$ successfully solves the task $q$ as judged by the neural model evaluator $V_{\\mathcal{L}}$ ; (2) Skill Usage: if the trajectory contains at least one call to at least one new skill in $\\mathcal{D}$ ; and (3) Skill Validity: if all skill-calling actions cause environment changes. If all three boxes are checked, we add the skills being called in the trajectory $\\tau_{f}$ to the agent skill library $\\mathcal{A}_{t} \\cup \\mathcal{D}_{\\text{called}} \\rightarrow \\mathcal{A}_{t+1}$ . By adding $\\mathcal{D}_{\\text{called}}$ , the agent can now generate actions that call these skill programs to solve subsequent tasks.", + "bbox": [ + 169, + 726, + 826, + 816 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 General Web Navigation Performance", + "text_level": 1, + "bbox": [ + 169, + 835, + 545, + 854 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Experiment Setup", + "text_level": 1, + "bbox": [ + 171, + 869, + 346, + 883 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Benchmark and Evaluation To evaluate ASI on general web navigation scenarios, we adopt the WebArena benchmark (Zhou et al., 2024a) that contains 812 test examples covering", + "bbox": [ + 169, + 895, + 825, + 926 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/4f88725b8dc9876aae1dde5be6e27efcb666ca61c19b6ac75e2549b3303cabd2.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelMethod# StepsSRShopAdminRedditGitLabMapsMulti
GPTVanilla-12.313.910.46.615.015.68.3
AWM5.935.532.129.154.735.042.218.8
ClaudeVanilla5.632.732.636.836.826.138.520.8
AWM5.936.334.839.051.928.939.418.8
ASI (ours)5.040.440.144.054.732.243.120.8
", + "bbox": [ + 178, + 89, + 816, + 188 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1: WebArena success rate by adaptive agents with programmatic skills, in comparison to a static vanilla agent baseline, and a text-skill learning adaptive agent.", + "bbox": [ + 169, + 196, + 823, + 226 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "five major web activity domains: e-commerce, social forum, software development, content management, and travel. Each example in WebArena has an NL query $q$ for the task, and a program-based evaluator that provides a binary $0/1$ score for any given trajectory $\\tau$ to judge if it successfully solves the task $q$ . This program-based evaluator enables relatively rigorous evaluation based on the functional correctness of the action trajectory. We report the average score across all WebArena examples, if not specified otherwise.", + "bbox": [ + 169, + 241, + 826, + 325 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Backbone LM and Agent Architecture We use the top-performing claude-3.5-sonnet model as the LM backbone for all components, including the agent policy $\\pi$ , the neural evaluator $V$ , and the skill induction modules $I$ . For experimentation, we use the BrowserGym (Chezelles et al., 2024) framework, which takes the webpage accessibility tree as observation, and instantiates the skill library $\\mathcal{A}$ with the WebArena default action space listed in $\\S A$ .", + "bbox": [ + 169, + 332, + 826, + 404 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Baselines We take the vanilla Claude model with the BrowserGym framework (Drouin et al., 2024) as the non-adaptive agent baseline. Additionally, we compare ASI to AWM (Wang et al., 2024b), the current top-performing online adaptive web agent method. Because AWM was originally developed with the gpt-4o model, for a fairer comparison, we also experiment with AWM with claude-3.5-sonnet model as its LM backbone and also apply the episode cleaning procedure to enhance induction quality. We compare the two baseline methods with our ASI approach. We provide the complete prompts for each agent component: task-solving, episode evaluation, episode cleaning, and skill induction, in SA.", + "bbox": [ + 169, + 409, + 826, + 521 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 Results and Analysis", + "text_level": 1, + "bbox": [ + 171, + 539, + 372, + 554 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In Table 1, compared to the vanilla static-agent baseline, adaptive agents (AWM and ASI) generally achieve $11.0 - 23.5\\%$ higher success rates overall. Among adaptive agents, our ASI with programmatic skills, achieves another $11.3\\%$ success rate gain across websites, compared to its AWM counterpart that induces and uses textual skills. Meanwhile, ASI offers additional efficiency benefits by reducing the number of steps in solutions by $15.3\\%$ and $10.6\\%$ than vanilla and AWM agents, as one skill-call action can often execute multiple steps written in primitive actions used by vanilla and AWM agents. These advantages in correctness and efficiency are exhibited prominently across different websites and tasks, as shown by the website breakdown on Table 1 (right). Refer to $\\S B$ for more analysis.", + "bbox": [ + 169, + 565, + 826, + 691 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 Why are Programmatic Skills Better?", + "text_level": 1, + "bbox": [ + 171, + 709, + 486, + 724 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To more concretely answer why programmatic skills are more effective than textual skills, we take a closer look on the two main differences between AWM and ASI: [1] whether the induction outcome is verified via execution, and [2] whether the induced skills are provided in memory for reference purpose only, or in the action space that allows execution.", + "bbox": [ + 169, + 734, + 826, + 792 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Better Induction Quality We take the shopping website as a representative, and analyze the textual and program skills induced by AWM and ASI agents. We group textual and program skills by their functionality and show one representative example in Table 2. Compared to the clear functional boundary and highly-reusable granularity of the search_product skill, we find that the textual skills often have (1) more redundant steps, (2) example-specific context: e.g., the last text skill aims to find 'game accessories' while the steps generally apply to any product, and (3) fuzzier boundaries between separable tasks, e.g., the first skill mixes product-search and add-to-wishlist procedures, thus may not offer optimal guidance when asked to, e.g., search product and add it to cart instead.", + "bbox": [ + 169, + 799, + 826, + 925 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/495de9455d437d5fb7075c1e7ad702e4817af15dbe99fd41fe407351a3037267.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Programmatic SkillsTextual Skills
def search_product_search_box_id: str, query: str):\n ""Search for a product using the search box.\n Args:\n search_box_id: ID of the search input field\n query: Search query string to enter\n Returns:\n None\nExamples:\n search_product('595', 'sony bluetooth headphones')\n ""\nclick_search_box_id)\nfill_search_box_id, query)\nkeyboard_press('Enter'))Task: Search for a product and add it to wish list\nAction Trajectory:\n __fill(621, {product_name})__ # Enter the product\nname in the search box\n __click(478)__ # Click the search button to execute the\nsearch\n __click({product_link})__ # Click the product to check\nmore details\n __click(1769)__ # Click the "Add to Wish List" link
Task: Search for a product's price range in the store\nAction Trajectory:\n __fill(565, {product_name})__ # Enter the product\nname in the search box\n __click(570)__ # Click the search button to execute the\nsearch\n __noop(1000)__ # Wait for search results to load\n __send msg_to_user({price_range_info})__ # \nAnalyze and report the price range findings from the\nsearch results
Task: Search for gaming accessories within a date range\nAction Trajectory:\n __click(1274)__ # Navigate to the Video Games category\n __fill(473, {search_terms})__ # Enter search terms\nincluding product name and year\n __click(478)__ # Execute the search
", + "bbox": [ + 173, + 88, + 823, + 392 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Verified Induction Improves End Success Rate From qualitative examination of the induction outcomes, we find roughly similar numbers of episodes evaluated as correct and used for induction (70 and 58 examples for AWM and ASI), ASI produced programs pass verification for only $15.6\\%$ of the turns, whereas AWM adds new skills for $31.4\\%$ of the time (replace or add none otherwise). While skill usage (in memory or as action, [2]) is designated for AWM and ASI, we hypothesize that verification [1] affects induction quality and thus end success. We thus experiment with another setting that induces programs (such that verification is enabled), and only use the induced skills in memory, to study the importance of induction quality. As shown in Table 3, inducing skills with execution-based verification (i.e., (unverified, text) $\\rightarrow$ (verified, program)), while always present skills in memory, improves end success rate by 4.2 points, indicating the importance of higher-quality induction via verification. Yet it is still 3.7 points lower than ASI, suggesting the incompatibility of program format to agent memory. Indeed, we observe many cases where the agent tries to call the skill programs but unsuccessfully, since they are not supported in the action space.", + "bbox": [ + 169, + 434, + 826, + 632 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Textual Representations Suit Memory Better To prevent the agent from trying to call these plausible programs, we ablate another setting that transforms program skills to textual format (as Table 2 right) and provide them in agent memory, dubbed (verified, text). This format transformation effectively improves the overall success rate by another 2.6 points, getting a little closer to ASI. Given the different downstream usage,", + "bbox": [ + 171, + 637, + 485, + 775 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/fb0d1499b20375bbf7fa5c5dc65ac574e4f19e2c831ae86c1eae513719edaffe.jpg", + "table_caption": [ + "Table 2: Example textual and program skills induced on the shopping website." + ], + "table_footnote": [], + "table_body": "
MethodSR
Add to Memoryunverified, text32.6
verified, program36.4
verified, text39.0
Add as Actionsverified, program40.1
", + "bbox": [ + 496, + 641, + 821, + 732 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 3: Ablation study on induction verification and format on the shopping website.", + "bbox": [ + 493, + 736, + 826, + 766 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "i.e., memory or actuation, textual and program formats may suit individual scenarios better.", + "bbox": [ + 169, + 776, + 826, + 791 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Beyond basic web navigation tasks, in the next two sections, we examine agents in two other important scenarios, scaled-up activities (§4) and cross-website generalization (§5).", + "bbox": [ + 169, + 796, + 825, + 825 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 Scaled-Up Browsing Activities", + "text_level": 1, + "bbox": [ + 169, + 848, + 480, + 867 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The WebArena benchmark mainly features isolated, single-task scenarios, such as adding a single product to the shopping cart. However, in real-world practices, people need to do a series of such tasks together, such as adding multiple related products (e.g., coffee and", + "bbox": [ + 169, + 881, + 825, + 926 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "mug) to the cart before finally checking out. This browsing request can lead to extremely long-horizon tasks, sometimes with repetitive intermediate procedures. We identify this to be a scenario to further demonstrate the efficacy of program skills, as opposed to textual skills, as programs lend themselves naturally to repeated invocation and composition.", + "bbox": [ + 169, + 103, + 823, + 161 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Therefore, we curate several case scenarios where the user asks for action-dense instructions, such as the tasks listed in Figure 3. Because the tasks are long-horizon and involve multiple sub-tasks, we follow Xu et al. (2024) and set up intermediate checkpoints to better track the intermediate progress of agents. Refer to SC.1 to see the full list of tasks and their evaluation checkpoints. We measure the success rate of each example by the percentage of checkpoints achieved by the agent. We report the average success rate of all examples, as well as the average number of steps taken to solve the tasks, in Table 4.", + "bbox": [ + 169, + 166, + 826, + 265 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/1148f878af9686e2ea67d104ffeb29c5fefb949586578f2979b3fc083583026f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodShoppingAdminRedditGitLabMap
sr ↑# steps ↓sr ↑# steps ↓sr ↑# steps ↓sr ↑# steps ↓sr ↑# steps ↓
VANILLA41.723.558.020.833.323.033.340.040.015.2
AWM68.321.574.018.240.016.850.033.865.012.6
ASI (ours)100.016.391.014.255.012.855.025.4100.06.2
", + "bbox": [ + 174, + 273, + 823, + 347 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 4: Performance of vanilla, AWM, and ASI agents in scaled-up browsing scenarios. We perform statistical testing between ASI and each baseline and verify all improvements are statistically significant with t-statistics $|t| > 2$ and $p < 0.05$ ; see SC.3 for more details.", + "bbox": [ + 169, + 354, + 826, + 398 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "ASI Features Improved Efficiency Across all websites, ASI-produced trajectories have 6.6-14.6 and $4.0 - 8.4\\%$ fewer steps, compared to vanilla and AWM baselines, respectively. As the task horizon continues to grow when involving more intermediate checkpoints, this margin between ASI and baselines will predictably be more prominent.", + "bbox": [ + 169, + 411, + 825, + 469 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Subsequent Benefits in Success Rate ASI also achieves higher success rates with more efficient trajectories, outperforming vanilla and AWM baselines by $38.9\\%$ and $20.7\\%$ on average. From manual analysis, we find this improvement comes from easier, better agent planning when using higher-level skills, without the need to tackle more complex procedures if only low-level primitive actions are available, as with vanilla and AWM agents.", + "bbox": [ + 169, + 474, + 823, + 546 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Case Study: Changing Multiple Addresses We present a representative case on the shopping website: changing billing and shipping addresses after moving. As depicted in the top row in Figure 3, the vanilla agent without adaptive skills often roams into some irrelevant exploration steps, instead of sticking to the optimal route to solve the required task. It runs for minutes and exhausts the maximum steps (i.e., 50) before finishing the task.", + "bbox": [ + 169, + 551, + 826, + 623 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/810aadc8162de8b670c598520349aa2a623d11538656c116f1a472d096254632.jpg", + "image_caption": [ + "Figure 3: Example scaled-up task of updating multiple addresses on shopping website." + ], + "image_footnote": [], + "bbox": [ + 173, + 628, + 826, + 901 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 491, + 946, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "With AWM, adding textual skills in memory provides soft guidelines for agents to follow, the agent thus better sticks to the goal and finishes each part of the task (i.e., navigate to the address page, update billing/shipping address) step by step. Although successful, the trajectory is long, i.e., 27 steps, and still takes a few minutes to finish.", + "bbox": [ + 169, + 102, + 826, + 161 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In comparison, ASI (in Figure 3 bottom row) showcases its efficiency by using learned skills to navigate_to_address_settings and update_address_details can solve each part in one step (vs. the 3-6 steps used by AWM for these parts). Overall, ASI correctly finishes all required actions in only 4 steps, shortening the horizon by $85.2\\%$ compared to AWM.", + "bbox": [ + 169, + 166, + 823, + 224 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 Adapting Across Websites", + "text_level": 1, + "bbox": [ + 171, + 246, + 441, + 266 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To examine whether agents can generalize with learned skills, we test agents on real-world website counterparts for some of the domains in WebArena as listed in Table 5. This experiment setup can reflect on (1) transfer across different websites of the same domain,", + "bbox": [ + 169, + 280, + 421, + 391 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "and (2) transfer from simulated, sandboxed to real-world websites.", + "bbox": [ + 171, + 391, + 656, + 405 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/b55e238f84562ee1941db044ca264e0042cc5add90554d1fc6dd8b5509f25b8a.jpg", + "table_caption": [], + "table_footnote": [ + "Table 5: Real-world in-domain website counterparts to each WebArena sandboxed website." + ], + "table_body": "
DomainWebArena SandboxedReal-World
shoppingOneStopMarketTarget
online forumPostMillReddit
travelOpenStreetMapGoogle Maps
", + "bbox": [ + 434, + 282, + 816, + 349 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "For each sandbox-real website pair, we take ten information-seeking style queries (He et al., 2024) in WebArena that do not involve potential privacy leakage or unrecoverable risky actions, such as making a purchase or changing user password. We provide the task details in §C.2. We compare ASI and AWM with their programmatic and textual skills as learned in §3, as well as comparing to the vanilla static agent baseline.", + "bbox": [ + 169, + 411, + 826, + 484 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Transferring Common Skills In Figure 4, we can see how ASI can effectively reuse common skills such as search_product in the first step on the Target website.", + "bbox": [ + 171, + 489, + 387, + 574 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Incompatible Skills One challenge faced by ASI is that some prior skills become incompatible on the new website. For example, the sort_by Listings() induced on OneStopMarket selects options from a dropdown menu, yet sorting on", + "bbox": [ + 169, + 579, + 390, + 705 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "the Target website opens a sidebar; despite their semantic similarity, the concrete actions in skill programs are no longer applicable. Still, we find that agents can often spot this incompatibility and rarely attempt to use these deprecated skills.", + "bbox": [ + 169, + 705, + 823, + 748 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/32b98aeaec3843cbaad2caf4552567bbfe930405b8409da2257233ba69eff45a.jpg", + "image_caption": [ + "Figure 4: ASI can generalize the search_product skill but face incompatibility when sorting items." + ], + "image_footnote": [], + "bbox": [ + 398, + 494, + 816, + 660 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Adapting Skills to New Environment Although some skills induced on previous websites", + "bbox": [ + 169, + 753, + 823, + 768 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "cannot be directly used on arbitrary new websites, we hypothesize that these skills can still serve as informative references on solving procedurally similar tasks or composing new skills targeted for the new website design.", + "bbox": [ + 169, + 768, + 369, + 893 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/32d2941bc49fc2c302ec4d75a44393a68d9505c660e98d27b329443e04fd912a.jpg", + "table_caption": [], + "table_footnote": [ + "Table 6: Cross-website results. ASI significantly surpasses baselines in sr and # steps (with $|t| > 2$ and $p < 0.05$ ) from our analysis in §C.3." + ], + "table_body": "
MethodShoppingRedditMap
sr ↑# steps ↓sr ↑# steps ↓sr ↑# steps ↓
Vanilla80.05.440.04.863.37.4
AWM80.05.056.74.8100.06.2
ASI90.03.476.74.493.34.4
AWM + update80.05.463.35.8100.07.2
ASI + update90.03.276.74.093.34.2
", + "bbox": [ + 383, + 772, + 816, + 869 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_footnote", + "text": "2We did not test on administrative and software websites given their more severe safety concerns.", + "bbox": [ + 189, + 909, + 823, + 924 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We thus allow agents to induce new skills or update previously acquired skills from experiences on the new website, denoted as +update entries in Table 6. We find that enabling skill update in both textual and program formats helps agent performance on new websites. Within the short online learning process (tens of examples), AWM adapts faster to the new websites, while ASI sees a more pronounced improvement in efficiency.", + "bbox": [ + 169, + 103, + 828, + 176 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 Related Work", + "text_level": 1, + "bbox": [ + 171, + 196, + 328, + 212 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Adaptive Digital Agents An important thread of agent-improving methods is to build adaptive agents that can autonomously self-improve from experiences. Most works focus on integrating past experiences into agent memory by collecting human annotation (Deng et al., 2024) or LM-based synthesis (Ou et al., 2024; Xu et al., 2025), especially via agent-driven exploration with instruction- (Murty et al., 2024b) or trajectory-driven (Murty et al., 2024a) approaches, offering warm starts on the websites of interest. Other works gather experiences (Wang et al., 2024b) or feedback (Qu et al., 2024) during test time, and augment them into memory through parametric channels such as supervised fine-tuning (Murty et al., 2024a), contrastive learning (Song et al., 2024), or reinforcement learning (Zhou et al., 2024b). Meanwhile, non-parametric approaches can directly augment reflections (Shinn et al., 2023), raw past experiences (Wang et al., 2023; Zheng et al., 2023), or further induced reusable workflows (Wang et al., 2024b). While these adaptive agents learn textual skills stored in memory, our ASI stores skills as verifiable and composable programs in the agent action space (i.e., skill library), thus enabling better quality and efficiency.", + "bbox": [ + 169, + 229, + 826, + 426 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Skill Discovery and Learning Learning specialized skills for tasks in programmatic (Shin et al., 2019; Ellis et al., 2023; Cai et al., 2024; Wang et al., 2024a; Grand et al., 2024), embodied (Sharma et al., 2022; Wang et al., 2023; Liang et al., 2023; Sarch et al., 2024; Wong et al., 2024), and physical (Yu et al., 2023) environments has shown to success in agent performance. Particularly for digital agents built for web navigation tasks, most works focus on exploring skills offline with RL roll-outs (Gur et al., 2018; Liu et al., 2018; Putta et al., 2024; Qi et al., 2024) or LM-based prompting (Zhou et al., 2024b; Murty et al., 2024a; Patel et al., 2024). While this exploration stage could offer some supervised data to update the agent policy either parametric (Murty et al., 2024a; Patel et al., 2024) or non-parametrically (Zheng et al., 2023; Murty et al., 2024b), it often costs enormous extra computation and may suffer from the lack or mismatch in distribution with the downstream tasks at hand (Wang et al., 2024b). In contrast, our ASI does not rely on supervised data and can directly learn skills online without prior exploration.", + "bbox": [ + 169, + 430, + 828, + 612 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Web Navigation Benchmarks Digital agents have been explored across a wide range of tasks (Yao et al., 2024; Kapoor et al., 2025; Xie et al., 2024b), among which one of the most popular application being browsing and navigating through versatile websites such as shopping (Yao et al., 2022), social media communication (Zhou et al., 2024a; Koh et al., 2024), knowledge work tasks (Drouin et al., 2024), and more (Deng et al., 2024). Our work focuses on general web navigation tasks using the WebArena (Zhou et al., 2024a) benchmark, meanwhile exploring other challenging scenarios such as scaled-up activities (Yoran et al., 2024) and cross-domain generalization (Deng et al., 2024).", + "bbox": [ + 169, + 618, + 828, + 729 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "7 Conclusion and Future Discussions", + "text_level": 1, + "bbox": [ + 171, + 752, + 524, + 768 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this work, we present ASI to support web navigation agents to autonomously induce, verify, learn, and apply programmatic skills during online inference. Beyond achieving $23.5\\%$ success rate and $15.3\\%$ efficiency increases in general web tasks, we also showcase ASI's strengths for scaled-up web activities, thanks to the high-level action interface offered by the programmatic abstraction. Moreover, we examine skill generalizability to new, real-world websites, and find ASI still offers great efficiency while flexibly updating skills to new environments. While our work aims to offer insights on the optimal representation in agent skill acquisition, we still find multiple pieces in ASI worthy of further investigation, such as the conceptually or empirically suitable granularity of skills, the stability of the online evolving process, and the skill quality in comparison to human expert desiderata.", + "bbox": [ + 169, + 784, + 828, + 926 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 171, + 102, + 346, + 119 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We would like to thank Jiayuan Mao, Yueqi Song, Boyuan Zheng, and Yu Su for the insightful discussions. We thank Yiqing Xie, Xinran Zhao, and Mingqian Zheng for their helpful comments on the paper draft. Zora is supported by the CMU Presidential Fellowship and Fujitsu Research. Apurva is supported by Amazon.", + "bbox": [ + 169, + 133, + 826, + 191 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 171, + 210, + 274, + 227 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin (eds.), Advances in Neural Information Processing Systems, volume 33, pp. 1877-1901. Curran Associates, Inc., 2020. URL https://proceedings.neurips.cc/paper_files/paper/2020/file/1457c0d6bfcb4967418bf8ac142f64a-Paper.pdf.", + "Tianle Cai, Xuezhi Wang, Tengyu Ma, Xinyun Chen, and Denny Zhou. Large language models as tool makers. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=qV83K9d5WB.", + "De Chezelles, Thibault Le Sellier, Maxime Gasse, Alexandre Lacoste, Alexandre Drouin, Massimo Caccia, Léo Boisvert, Megh Thakkar, Tom Marty, Rim Assouel, et al. The browsergym ecosystem for web agent research. arXiv preprint arXiv:2412.05467, 2024.", + "Xiang Deng, Yu Gu, Boyuan Zheng, Shijie Chen, Sam Stevens, Boshi Wang, Huan Sun, and Yu Su. Mind2web: Towards a generalist agent for the web. Advances in Neural Information Processing Systems, 36, 2024.", + "Alexandre Drouin, Maxime Gasse, Massimo Caccia, Issam H Laradji, Manuel Del Verme, Tom Marty, Léo Boisvert, Megh Thakkar, Quentin Cappart, David Vazquez, et al. Workarena: How capable are web agents at solving common knowledge work tasks? arXiv preprint arXiv:2403.07718, 2024.", + "Kevin Ellis, Lionel Wong, Maxwell Nye, Mathias Sable-Meyer, Luc Cary, Lore Anaya Pozo, Luke Hewitt, Armando Solar-Lezama, and Joshua B Tenenbaum. Dreamcoder: growing generalizable, interpretable knowledge with wake-sleep bayesian program learning. Philosophical Transactions of the Royal Society A, 381(2251):20220050, 2023.", + "Yao Fu, Dong-Ki Kim, Jaekyeom Kim, Sungryull Sohn, Lajanugen Logeswaran, Kyunghoon Bae, and Honglak Lee. Autoguide: Automated generation and selection of state-aware guidelines for large language model agents. CoRR, abs/2403.08978, 2024. URL https://doi.org/10.48550/arXiv.2403.08978.", + "Gabriel Grand, Lionel Wong, Matthew Bowers, Theo X. Olausson, Muxin Liu, Joshua B. Tenenbaum, and Jacob Andreas. LILO: Learning interpretable libraries by compressing and documenting code. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=TqYbAWKMJe.", + "Izzeddin Gur, Ulrich Rückert, Aleksandra Faust, and Dilek Hakkani-Tür. Learning to navigate the web. CoRR, abs/1812.09195, 2018. URL http://arxiv.org/abs/1812.09195.", + "Hongliang He, Wenlin Yao, Kaixin Ma, Wenhao Yu, Yong Dai, Hongming Zhang, Zhenzhong Lan, and Dong Yu. Webvoyager: Building an end-to-end web agent with large multimodal models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics, 2024. URL https://aclanthology.org/2024.acl-long.371/." + ], + "bbox": [ + 171, + 236, + 828, + 925 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Raghav Kapoor, Yash Parag Butala, Melisa Russak, Jing Yu Koh, Kiran Kamble, Waseem AlShikh, and Ruslan Salakhutdinov. Omniact: A dataset and benchmark for enabling multimodal generalist autonomous agents for desktop and web. In European Conference on Computer Vision, pp. 161-178. Springer, 2025.", + "Jing Yu Koh, Robert Lo, Lawrence Jang, Vikram Duvvur, Ming Chong Lim, Po-Yu Huang, Graham Neubig, Shuyan Zhou, Ruslan Salakhutdinov, and Daniel Fried. Visualwebarena: Evaluating multimodal agents on realistic visual web tasks. arXiv preprint arXiv:2401.13649, 2024.", + "Sergey Levine, Aviral Kumar, George Tucker, and Justin Fu. Offline reinforcement learning: Tutorial, review, and perspectives on open problems. arXiv preprint arXiv:2005.01643, 2020.", + "Jacky Liang, Wenlong Huang, Fei Xia, Peng Xu, Karol Hausman, Brian Ichter, Pete Florence, and Andy Zeng. Code as policies: Language model programs for embodied control. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pp. 9493-9500. IEEE, 2023.", + "Evan Zheran Liu, Kelvin Guu, Panupong Pasupat, and Percy Liang. Reinforcement learning on web interfaces using workflow-guided exploration. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id=ryTp3f-0-.", + "Bodhisattwa Prasad Majumder, Bhavana Dalvi, Peter Jansen, Oyvind Tafjord, Niket Tandon, Li Zhang, Chris Callison-Burch, and Peter Clark. CLIN: A continually learning language agent for rapid task adaptation and generalization, 2024. URL https://openreview.net/forum?id=d5DGVHMsC.", + "Shikhar Murty, Dzmitry Bahdanau, and Christopher D. Manning. Nnetscape navigator: Complex demonstrations for web agents without a demonstrator, 2024a. URL https://arxiv.org/abs/2410.02907.", + "Shikhar Murty, Christopher Manning, Peter Shaw, Mandar Joshi, and Kenton Lee. Bagel: Bootstrapping agents by guiding exploration with language, 2024b. URL https://arxiv.org/abs/2403.08140.", + "Tianyue Ou, Frank F. Xu, Aman Madaan, Jiarui Liu, Robert Lo, Abishek Sridhar, Sudipta Sengupta, Dan Roth, Graham Neubig, and Shuyan Zhou. Synatra: Turning indirect knowledge into direct demonstrations for digital agents at scale. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=KjNEzWRIqn.", + "Yichen Pan, Dehan Kong, Sida Zhou, Cheng Cui, Yifei Leng, Bing Jiang, Hangyu Liu, Yanyi Shang, Shuyan Zhou, Tongshuang Wu, and Zhengyang Wu. Webcanvas: Benchmarking web agents in online environments. In Agentic Markets Workshop at ICML 2024, 2024. URL https://openreview.net/forum?id=O1FaGasJob.", + "Ajay Patel, Markus Hofmarcher, Claudiu Leoveanu-Condrei, Marius-Constantin Dinu, Chris Callison-Burch, and Sepp Hochreiter. Large language models can self-improve at web agent tasks. arXiv preprint arXiv:2405.20309, 2024.", + "Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv preprint arXiv:2408.07199, 2024.", + "Zehan Qi, Xiao Liu, Iat Long Iong, Hanyu Lai, Xueqiao Sun, Wenyi Zhao, Yu Yang, Xinyue Yang, Jiadai Sun, Shuntian Yao, et al. Webrl: Training llm web agents via self-evolving online curriculum reinforcement learning. arXiv preprint arXiv:2411.02337, 2024.", + "Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive introspection: Teaching language model agents how to self-improve, 2024. URL https://arxiv.org/abs/2407.18219." + ], + "bbox": [ + 171, + 102, + 826, + 922 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Gabriel Sarch, Lawrence Jang, Michael Tarr, William W Cohen, Kenneth Marino, and Katerina Fragkiadaki. Vlm agents generate their own memories: Distilling experience into embodied programs of thought. Advances in Neural Information Processing Systems, 37: 75942-75985, 2024.", + "Amrith Setlur, Nived Rajaraman, Sergey Levine, and Aviral Kumar. Scaling test-time compute without verification or rl is suboptimal. arXiv preprint arXiv:2502.12118, 2025.", + "Pratyusha Sharma, Antonio Torralba, and Jacob Andreas. Skill induction and planning with latent language. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics, May 2022. URL https://aclanthology.org/2022.acl-long.120/.", + "Eui Chul Shin, Miltiadis Allamanis, Marc Brockschmidt, and Alex Polozov. Program synthesis and semantic parsing with learned code idioms. Advances in Neural Information Processing Systems, 32, 2019.", + "Noah Shinn, Federico Cassano, Edward Berman, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. Reflexion: Language agents with verbal reinforcement learning, 2023. URL https://arxiv.org/abs/2303.11366.", + "Yifan Song, Da Yin, Xiang Yue, Jie Huang, Sujian Li, and Bill Yuchen Lin. Trial and error: Exploration-based trajectory optimization for llm agents, 2024. URL https://arxiv.org/abs/2403.02502.", + "Guanzhi Wang, Yuqi Xie, Yunfan Jiang, Ajay Mandlekar, Chaowei Xiao, Yuke Zhu, Linxi Fan, and Anima Anandkumar. Voyager: An open-ended embodied agent with large language models, 2023. URL https://arxiv.org/abs/2305.16291.", + "Zhiruo Wang, Graham Neubig, and Daniel Fried. TroVE: Inducing verifiable and efficient toolboxes for solving programmatic tasks. In *Forty-first International Conference on Machine Learning*, 2024a. URL https://openreview.net/forum?id=DCNCwamJjI.", + "Zora Zhiruo Wang, Jiayuan Mao, Daniel Fried, and Graham Neubig. Agent workflow memory. arXiv preprint arXiv:2409.07429, 2024b.", + "Lionel Wong, Jiayuan Mao, Pratyusha Sharma, Zachary S Siegel, Jiahai Feng, Noa Korneev, Joshua B. Tenenbaum, and Jacob Andreas. Learning grounded action abstractions from language. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=qJ0Cfj4Ex9.", + "Jian Xie, Kai Zhang, Jiangjie Chen, Tinghui Zhu, Renze Lou, Yuandong Tian, Yanghua Xiao, and Yu Su. Travelplanner: A benchmark for real-world planning with language agents. In *Forty-first International Conference on Machine Learning*, 2024a. URL https://openreview.net/forum?id=15XQzNkAOe.", + "Tianbao Xie, Danyang Zhang, Jixuan Chen, Xiaochuan Li, Siheng Zhao, Ruisheng Cao, Toh Jing Hua, Zhoujun Cheng, Dongchan Shin, Fangyu Lei, et al. Osworld: Benchmarking multimodal agents for open-ended tasks in real computer environments. arXiv preprint arXiv:2404.07972, 2024b.", + "Frank F Xu, Yufan Song, Boxuan Li, Yuxuan Tang, Kritanjali Jain, Mengxue Bao, Zora Z Wang, Xuhui Zhou, Zhitong Guo, Murong Cao, et al. Theagentcompany: benchmarking llm agents on consequential real world tasks. arXiv preprint arXiv:2412.14161, 2024.", + "Yiheng Xu, Dunjie Lu, Zhennan Shen, Junli Wang, Zekun Wang, Yuchen Mao, Caiming Xiong, and Tao Yu. Agenttrek: Agent trajectory synthesis via guiding replay with web tutorials. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=EEgYUccwsV.", + "Shunyu Yao, Howard Chen, John Yang, and Karthik Narasimhan. Webshop: Towards scalable real-world web interaction with grounded language agents. Advances in Neural Information Processing Systems, 35:20744-20757, 2022." + ], + "bbox": [ + 171, + 102, + 826, + 924 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Shunyu Yao, Noah Shinn, Pedram Razavi, and Karthik Narasimhan. $\\tau$ -bench: A benchmark for tool-agent-user interaction in real-world domains. arXiv preprint arXiv:2406.12045, 2024.", + "Ori Yoran, Samuel Joseph Amouyal, Chaitanya Malaviya, Ben Bogin, Ofir Press, and Jonathan Berant. Assistantbench: Can web agents solve realistic and time-consuming tasks? arXiv preprint arXiv:2407.15711, 2024.", + "Wenhao Yu, Nimrod Gileadi, Chuyuan Fu, Sean Kirmani, Kuang-Huei Lee, Montserrat Gonzalez Arenas, Hao-Tien Lewis Chiang, Tom Erez, Leonard Hasenclever, Jan Humplik, brian richter, Ted Xiao, Peng Xu, Andy Zeng, Tingnan Zhang, Nicolas Heess, Dorsa Sadigh, Jie Tan, Yuval Tassa, and Fei Xia. Language to rewards for robotic skill synthesis. In 7th Annual Conference on Robot Learning, 2023. URL https://openreview.net/forum?id= SgTPdyehXMA.", + "Longtao Zheng, Rundong Wang, Xinrun Wang, and Bo An. Synapse: Trajectory-as-exemplar prompting with memory for computer control. In The Twelfth International Conference on Learning Representations, 2023.", + "Shuyan Zhou, Frank F. Xu, Hao Zhu, Xuhui Zhou, Robert Lo, Abishek Sridhar, Xianyi Cheng, Tianyue Ou, Yonatan Bisk, Daniel Fried, Uri Alon, and Graham Neubig. Webarena: A realistic web environment for building autonomous agents. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=oKn9c6ytLx.", + "Yifei Zhou, Qianlan Yang, Kaixiang Lin, Min Bai, Xiong Zhou, Yu-Xiong Wang, Sergey Levine, and Erran Li. Proposer-agent-evaluator (pae): Autonomous skill discovery for foundation model internet agents. arXiv preprint arXiv:2412.13194, 2024b." + ], + "bbox": [ + 171, + 102, + 828, + 474 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A Experiment Details", + "text_level": 1, + "bbox": [ + 171, + 101, + 385, + 119 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.1 Agent Action Space", + "text_level": 1, + "bbox": [ + 171, + 136, + 366, + 152 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/1aaa066e75e3a385f9f9d19b3acdea81acd51a20e825389f3c2d58675f3d0989.jpg", + "table_caption": [ + "Table 7 shows the default action space the web navigation agents we employed in all the experiments. This action space remains the same for both (i) static, vanilla agent, as well as the (ii) adaptive agent that learn textual skills in memory, i.e., AWM." + ], + "table_footnote": [], + "table_body": "
Action TypeDescription
noopwait_ms)Do nothing for specified time.
click(elem)Click at an element.
hover(elem)Hover on an element.
fill(elem, value)Type into an element.
keyboard_press(key_comb)Press a key combination.
scroll(x, y)Scroll horizontally or vertically.
select_option(elem, options)Select one or multiple options.
goto(url)Navigate to a url.
go_back()Navigate to the previous page.
go_forward()Navigate to the next page.
new_tab()Open a new tab.
tab_close()Close the current tab.
tabfocus(index)Bring tab to front.
send msg_to_user(text)Send a message to the user.
report_infeasible Reason)Notify user that instructions are infeasible.
", + "bbox": [ + 238, + 217, + 754, + 450 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table 7: Initial action space for agents throughout our experiments.", + "bbox": [ + 253, + 454, + 743, + 470 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.2 Prompts for LLM-Based Components", + "text_level": 1, + "bbox": [ + 171, + 502, + 496, + 518 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Prompt for Episode Evaluation", + "text_level": 1, + "bbox": [ + 197, + 535, + 431, + 551 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "You are an expert in evaluating the performance of a web navigation agent. The agent is designed to help a human user navigate a website to complete a task. Given the user's intent, the agent's action history, the final state of the webpage, and the agent's response to the user, your goal is to decide whether the agent's execution is successful or not. Please be careful of each detail and strict about the evaluation process.", + "bbox": [ + 194, + 564, + 802, + 648 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "There are three types of tasks: 1. Information seeking: The user wants to obtain certain information from the webpage, such as the information of a product, reviews, map info, comparison of map routes, etc. The bot's response must contain the information the user wants, or explicitly state that the information is not available. Otherwise, e.g. the bot encounters an exception and respond with the error content, the task is considered a failure. Besides, be careful about the sufficiency of the agent's actions. For example, when asked to list the top-searched items in a shop, the agent should order the items by the number of searches, and then return the top items. If the ordering action is missing, the task is likely to fail. 2. Site navigation: The user wants to navigate to a specific page. Carefully examine the bot's action history and the final state of the webpage to determine whether the bot successfully completes the task. No need to consider the bot's response. 3. Content modification: The user wants to modify the content of a webpage or configuration. Carefully examine the bot's action history and the final state of the webpage to determine whether the bot successfully completes the task. No need to consider the bot's response.", + "bbox": [ + 194, + 647, + 803, + 856 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "*IMPORTANT* Please be strict about the evaluation process. Format your response into two lines as shown below:", + "bbox": [ + 196, + 854, + 800, + 883 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Thoughts: ", + "bbox": [ + 196, + 883, + 571, + 898 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Status: \"success\" or \"failure\"", + "bbox": [ + 196, + 897, + 411, + 910 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Prompt for Episode Cleaning", + "text_level": 1, + "bbox": [ + 197, + 104, + 419, + 121 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "You are a helpful assistant in summarizing web browsing actions. Your task is to summarize the main action taken and the corresponding state change, from the input thought process and environmental description.", + "bbox": [ + 196, + 133, + 799, + 176 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Example input: \"Let me think about this step by step: 1) I've clicked the Columns button and based on the accessibility tree, I can now see a list of available columns that can be shown/hidden. 2) Looking through the available columns in the accessibility tree, I don't see any direct column for \"Number of Orders\" or similar metric. 3) Since I don't see the information I need in the available columns, this suggests that: - Either this information is available through a different path - Or we need to access the Sales/Orders section to correlate this information 4) The most logical path would be to: - Go to the Sales section where we can see all orders - And then analyze which customers have the most orders 5) From the accessibility tree, I can see the \"SALES\" link with bid \"183\" in the main navigation.", + "bbox": [ + 196, + 176, + 800, + 314 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Let me click on the SALES link: This should open the Sales menu where we can access the Orders section to analyze customer order histories.\"", + "bbox": [ + 196, + 314, + 797, + 340 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Example output: \"Clicked the SALES link to access the Orders section for customer order histories.\"", + "bbox": [ + 196, + 340, + 799, + 369 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Prompt for Skill Induction", + "text_level": 1, + "bbox": [ + 197, + 391, + 401, + 405 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "You are a proficient software engineer. Your task is to (1) summarize reusable functions as APIs from the provided action trajectories, and (2) rewrite the trajectories using the reusable functions you generated in (1).", + "bbox": [ + 196, + 419, + 799, + 462 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "For (1), from the provided examples about the same task, you job is to generate Python functions that can be reused to solve (part of) these tasks. The functions should have mediocre complexity: (i) containing at least three actions and not too simple (e.g., a single line of code), (ii) not too complex (e.g., more than 10 lines of code), and should be general enough to be applied to other similar tasks. The arguments to these functions should be common variables (such as strings and lists), avoid using complex inputs such as another function.", + "bbox": [ + 196, + 462, + 800, + 558 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The actions you can use are: 'click', 'fill', 'hover', 'keyboard_press', 'scroll'; 'tabfocus', 'new_tab', 'tab_close', 'go_back', 'go_forward'; 'send msg to user', 'report_infeasible', 'select_option'. Do not use other undefined actions. Do not include any try-except blocks in the functions.", + "bbox": [ + 196, + 558, + 800, + 612 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Please include 'Args', 'Returns', and 'Examples' in the function documentation. For (2), write the instruction and rewritten code of each example. Do not include the answer response or example-specific information in the rewritten code. Pay attention to whether all link IDs are available before specifying them in the generated functions. If you use 'send msg_to_user', make sure the message is decided within the function, instead of provided as an argument.", + "bbox": [ + 196, + 613, + 800, + 696 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Make sure each function contains no less than 2 steps, and no more than 5 steps; to keep the functions simple and task-oriented. You can generate zero, one, or multiple functions depending on the provided examples.", + "bbox": [ + 196, + 696, + 800, + 739 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B Skill Induction: Analysis", + "text_level": 1, + "bbox": [ + 171, + 773, + 434, + 791 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "We provide more details about the skill induction process, in skill curation and reuse frequency (§B.1) and representative skill case studies (§B.2)", + "bbox": [ + 169, + 806, + 823, + 835 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "B.1 Skill Induction and Reusability", + "text_level": 1, + "bbox": [ + 171, + 854, + 452, + 871 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "To provide more insights on how agents curate and reuse programmatic skills, for the main experiments on WebArena, we calculate the number of examples that (i) attempt to induce a new skill, (ii) successfully induce a new skill, and (iii) reuse a previously induced skill.", + "bbox": [ + 169, + 881, + 823, + 925 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "As shown in Table 8, the agent reuses at least 1 skill for $42.5\\%$ of the cases, which is pretty frequent across websites. Moreover, these skills are created using $7.6\\%$ of the tasks, demonstrating the high scalability of our skill learning approach.", + "bbox": [ + 169, + 103, + 826, + 148 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/a5be42cba2d45ce05778e8e19e0b347f9774925e80aff099ef819a4fa5a5e123.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DomainAttempted InductionNumber of Examples
Successful InductionReuse ExamplesTotal
shopping21896180
admin3815108156
reddit241114105
map131080109
gitlab251110175
", + "bbox": [ + 205, + 152, + 787, + 256 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 8: Analyzing the number of examples that attempt, induce, and reuse skills.", + "bbox": [ + 200, + 258, + 792, + 275 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.2 Representative Skills", + "text_level": 1, + "bbox": [ + 171, + 299, + 375, + 315 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We demonstrate two representative types of skills, which (i) chain frequently-used-together actions, and (ii) involve advanced programming primitives.", + "bbox": [ + 169, + 325, + 823, + 356 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Chaining Primitive Actions The most common type of skills chains multiple frequently-used-together primitive actions (e.g., click, fill) in a certain order, to reflect a particular common usage, such as the search_product tool illustrated below. This type of skill provides an easy portal for frequent workflows.", + "bbox": [ + 169, + 369, + 826, + 426 + ], + "page_idx": 15 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "def search_product(name: str):\n click(\"Search textbox...\")\n if has_pop_window():\n click(\"Close\")\n fill(\"Search textbox\", name)\n click(\"Search button\")", + "guess_lang": "python", + "bbox": [ + 163, + 431, + 457, + 512 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Advanced Programming Primitives Besides a sequential chaining of actions, some skills contain more advanced logics, such as if-else or for/while loops, as the two examples below.", + "bbox": [ + 169, + 537, + 826, + 568 + ], + "page_idx": 15 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "def navigate_and_sort_category(category_id: str, subcategory_id: str | None = None):\n '''\"Navigate to a product category and sort items by price.\nArgs:\n category_id: ID of the main category link\n subcategory_id: Optional ID of the subcategory link, if needed\nReturns:\n None\nExamples:\n navigate_and_sort_category('1241', '1873') # PS4 under Video Games\n navigate_and_sort_category('1245') # For main category only\n '''' click(category_id) # Click main category\nif subcategory_id:\n click(subcategory_id) # Click subcategory if provided\nselect_option(\"1553\", \"Price\") # Sort by price ascending", + "guess_lang": "python", + "bbox": [ + 155, + 571, + 792, + 829 + ], + "page_idx": 15 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "def browse_category_byjahicationmenu_id_sequence: list):\n \"\"Browse products by navigating through a sequence of menu IDs.\n This function allows navigation through a series of menu interactions.\n Args:\n menu_id_sequence(list): A list of menu IDs to interact\n sequentially, using hover actions followed by a click.", + "guess_lang": "python", + "bbox": [ + 155, + 845, + 818, + 925 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 516, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 948, + 508, + 959 + ], + "page_idx": 15 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Example usage: browse_category_byjahgnation['735','786','797']#Navigates Home&Kitchen->Storage&Organization->Racks,Shelves& Drawers \n\"''\" foridx,menu_idin enumerate(menu_id_sequence[-1]): hover/menu_id) click菜单_id_sequence[-1])#Click the final id to land in the predefined category", + "guess_lang": "txt", + "bbox": [ + 155, + 104, + 830, + 250 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C Scaled-Up and Cross-Website Tests", + "text_level": 1, + "bbox": [ + 169, + 279, + 524, + 297 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We provide the full list of tasks used in scaled-up (S4) and cross-website (S5) analyses in S.C.1 and S.C.2, respectively. In S.C.3, we further perform significance testing to validate the findings from Table 4 and Table 6.", + "bbox": [ + 169, + 311, + 826, + 356 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.1 Scaled-Up Tasks", + "text_level": 1, + "bbox": [ + 171, + 373, + 341, + 390 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/744d7a242930e30176d7727b1c46da76ad345db665e2c589a8713ff65ca7e1c6.jpg", + "table_caption": [ + "Table 9, Table 10, Table 11, Table 12 and Table 13 shows example scaled-up tasks studied on the shopping, admin, social forum, software development, and map websites." + ], + "table_footnote": [], + "table_body": "
InstructionCheckpointsScore
Add a wireless headphone, a water bottle, a notebook, a ground coffee, and a mug to my shopping cart.Add a wireless headphone to cart; Add a water bottle to cart; Add a notebook to cart; Add a ground coffee to cart; Add a mug to cart.5
Add the most expensive item from the video games category, the cheapest item from the Office Products category, and the most relevant coffee mug to my shopping cart.Add the most expensive item from the video games category to cart; Add the cheapest item from the Office Products category to cart; the most relevant coffee mug to my shopping cart.3
Add the cheapest wireless headphone, a water bottle, the most expensive notebook, a ground coffee, and a mug to my shopping cart.Add the cheapest wireless headphone to cart; Add a water bottle to cart; Add the most expensive notebook to cart; Add a ground coffee to cart; Add a mug to cart.5
Show me the ordered items for each cancelled order from Feb to May in 2023.Show me the 5/17/23 order; Show me the 2/24/23 order; Show me the 2/11/23 order.3
Iterative update my billing address to 231 Willow Way, Suite 100, Chicago, IL, 60601. Then, update my shipping address to 987 Sycamore Circle, Philadelphia, PA, 19102.Successfully update my billing address; Successfully update my shipping address.2
", + "bbox": [ + 173, + 444, + 823, + 760 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 9: Exemplar scaled-up browsing tasks on the shopping website.", + "bbox": [ + 243, + 768, + 751, + 785 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.2 Cross-Website Tasks", + "text_level": 1, + "bbox": [ + 171, + 811, + 370, + 825 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 14, Table 15, and Table 16 lists example tasks to test agent generalization abilities on shopping (OneStopMarket to Target), social forum (Postmill to Reddit), and software development (GitLab to GitHub) domains.", + "bbox": [ + 169, + 838, + 826, + 883 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.3 Significance Testing", + "text_level": 1, + "bbox": [ + 171, + 900, + 367, + 916 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/802ab4997fb966f03cc6360d294c6427efd24e884407d8c1be5c83e3522e3a9a.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
InstructionCheckpointsScore
Tell me the number of reviews that our store received by far that mention terms 'disappointed', 'satisfied', 'decent', 'not useful', and 'best'.Return the correct number for terms 'disappointed', 'satisfied', 'decent', 'not useful', and 'best'.5
I need to contact a list of customers. Find the customer name and email with phone number 2058812302, 2137418080, 2065555555, 8015551212, and 555-229-3326.Return the correct name and email information for customers with each of the five phone numbers.5
I will need to update our webpage to create a more energetic vibe. Change the page title of '404 Not Found' to 'Bruh bro you clicked the wrong page', the page title of 'Enable Cookies' to 'Cookie monster coming to your place', the page title of 'Home Page' page to 'This is the home page!!', the page with title 'Privacy Policy' to 'No privacy policy is needed is this dystopian world', and lastly, change the page 'About Us' to 'Secret'.Change the page title correctly for each of the five pages.5
I need to generate a bunch of report to show to the store manager in an hour. Could you help me generate a sales order report for the last month, over the last 45 days, and for Q1? I'll also need a refund report for last year, and a tax report for this year. Today is 3/15/2023.Generate a sales report for 2/1/2023-2/29/2023; generate a sales report for 1/29/2023-3/15/2023; generate a sales report for 1/1/2023-3/15/2023; Generate a refund report for 1/1/2022-12/31/2022; Generate a tax report for 1/1/2023-3/15/2023.5
Tell me the SKU of products that have 10 units, 3 units, and 0 units left. Also, give me the product names that have 2-3 units left.Return the correct SKU for the first three questions; return the correct product names for the last question.4
", + "bbox": [ + 173, + 99, + 823, + 542 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Scaled-Up Tasks We conduct t-tests between (i) ASI and AWM, (ii) ASI and VANILLA agent. From the results in Table 17, we find the advantage of ASI in success rate and efficiency improvements, when comparing to both AWM and VANILLA agents, are statistically significant, as indicated by all", + "bbox": [ + 169, + 592, + 421, + 717 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "t-statistics with absolute values over 2 and p-value below 0.05.", + "bbox": [ + 169, + 717, + 624, + 732 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/a48f320b1a2f55cba3570d504b013d6a4c5591b774509c0aaa4f65a44c16948a.jpg", + "table_caption": [ + "Table 10: Exemplar scaled-up browsing tasks on the shopping admin website." + ], + "table_footnote": [], + "table_body": "
Method PairSuccess Rate# Steps
t-statp-valuet-statp-value
ASI vs. AWM-2.36010.02262.76640.0068
ASI vs. VANILLA-4.09220.00022.19830.0296
", + "bbox": [ + 433, + 593, + 820, + 657 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Cross-Web Tasks We conduct similar significance testing on cross-web tasks and report the results in Table 18. While ASI does not significantly outperform AWM in success rate, given the presumably greater flexibility of textual workflows, ASI still exhibits significant advantages on", + "bbox": [ + 169, + 746, + 421, + 872 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "the efficiency side. Furthermore, comparing ASI to static VANILLA agents, ASI achieves significant improvements in both success rates and efficiency (i.e., reduced number of steps), suggested by $|t| > 2$ and $p < 0.05$ .", + "bbox": [ + 169, + 872, + 826, + 915 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/8109e663c1fd04718516750d3b79702b899db5b9302f97f72deea8dfb48e50d0.jpg", + "table_caption": [ + "Table 17: Results of significance testing on ASI's advantages for scaled-up web tasks." + ], + "table_footnote": [], + "table_body": "
Method PairSuccess Rate# Steps
t-statp-valuet-statp-value
ASI vs. AWM-1.39800.16732.12380.0378
ASI vs. VANILLA-3.59840.00072.57920.0125
", + "bbox": [ + 433, + 756, + 820, + 821 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 18: Results of significance testing on ASI's advantages for cross-web tasks.", + "bbox": [ + 429, + 828, + 826, + 858 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/f55cd46683e3679b683a331e175b0d050927180d5bf9ae39211680600432ba2f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
InstructionCheckpointsScore
I'm planning to organize multiple meetings in the next few months. Help me post notices on virtual meetings for the little women on Apr 10th, for Harry Potter in May 15th, and for Jane Eyre in Jan 30th, in the most suitable forums in PostMill.Post Apr 10th meetup; Post about May 15th meetup; Post Jan 30th meetup. All in book-related forums.3
Could you tell me all forums with names related to computer science?must include: deeplearning (1 pt), Machine-Learning (1 pt); optionally (get 1 score if include any): science, askscience, technology.3
Find the most relevant posts about jerseycity, newjersey, and nyc; and tell me how different they are.Correctly find post about jerseycity; Correctly find post about newjersey; Correctly find post about nyc; Answer how different they are.4
Thumbs down the top-2 posts in jerseycity, newjersey, and nyc forums, I don't like them.Thumbs down the top-2 posts in the jerseycity forum; Thumbs down the top-2 posts in the newjersey forum; Thumbs down the top-2 posts in the nyc forum.3
Reply "Thank you! This is super helpful!" to three posts about long-distance relationship advice.Reply to three posts with the correct message. Need to be relevant to long-distance relationship advice.3
", + "bbox": [ + 173, + 119, + 823, + 462 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/32cefbcbef9f4c017763f3d35ba213c0ab16333e67dde8da90f90a5625d1eeb9.jpg", + "table_caption": [ + "Table 11: Exemplar scaled-up tasks on the Postmill website." + ], + "table_footnote": [], + "table_body": "
InstructionCheckpointsScore
Display the list of issues in the a11yproject/a11yproject.com repository that have labels related to 'help needed', and assign the most recent one to the top contributor of this repository.Display the help-wanted issues; find the top contributor; assign him to the most recent help-needed issue.3
Set up a new, empty repository with the name agent_skill_induction, and create a MIT license file. Then, invite Abishek and Vinta as collaborators.Create a new repository with given name; Create a MIT license inside; Invite both collaborators.3
Start a private project web_agent.android xl with Android template and add primer, convexegg, abishek as members.Create the repository private and with Android template; Invite all three people as members.2
Add the following users to repo a11y-webring.club as developer: [abisubramanya27, lahwaacz], and [yjlou, a11yproject] as maintainer.Add abisubramanya27 and lahwaacz as developers; Add yjlou and a11yproject as maintainers.2
Add the following users [abisubramanya27, lahwaacz, yjlou, a11yproject] to repo a11y-webring.club, make sure to assign them different roles.Add abisubramanya27 with role 1; Add lahwaacz with role 2; Add yjlou with role 3; Add a11yproject as role 4. Role 1-4 need to be all different.4
", + "bbox": [ + 173, + 534, + 823, + 876 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Table 12: Exemplar scaled-up tasks on the GitLab website.", + "bbox": [ + 285, + 883, + 710, + 901 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/1ff0af4eb9d2ef32997f69eebd000de698114a0aa8fdfd862945d1007cd5f3ec.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
InstructionCheckpointsScore
Search for the closest restaurants, cafes, parking, and banks to Carnegie Mellon University on the map.Return the closest restaurants; Return the closest cafes; Return the closest parking; Return the closest banks.4
I will need to go to multiple places from Carnegie Mellon University today, including the Univ of Pittsburgh, UPMC shady-side, the Schenley park, and Squirrel Hill. Could you should me the driving route to all those places?Show me driving route from CMU to UPitt; Show me driving route from CMU to UPMC; Show me driving route from CMU to Schenley Park; Show me driving route from CMU to Squirrel Hill.4
Show me the route of driving from CMU to University of Pittsburgh, then walking to the Schenley Park; next, bike to UPMC shadyside, and walk to Squirrel Hill after that.Show me CMU → Upitt route by car; Show me Upitt → Schenley Park route by foot; Show me Schenley Park → UPMC route by bike; Show me UPMC → Squirrel Hill route by foot.4
Check if the Univ of Pittsburgh, UPMC shadyside, schenley park, and squirrel hill can be reached within one hour by walking, if departing from Carnegie Mellon University.Return yes to route 1, route 2, route 3, and route 4.4
Tell me the coordinates of Univ of Pittsburgh, UPMC shadyside, schenley park, squirrel hill, and CMU in DD format.Return the coordinates of each of the four places.4
", + "bbox": [ + 173, + 162, + 823, + 515 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/52294272a8d6f283c35d590bbc871337b4ac1a2662ec382ff8567094c2e77506.jpg", + "table_caption": [ + "Table 13: Exemplar scaled-up tasks on the Map website." + ], + "table_footnote": [], + "table_body": "
InstructionCheckpointsScore
Show me the options for Canon photo printer?Return the correct search result.1
I have a lot of Nintendo Switch game cards now, help me find the best storage option to fit all 11 cards.Return one valid product.1
What is the price range for beauty products?Return the correct price range.1
Show me products under $25 for woman shoesDisplay correct products.1
Show the least expensive shoe storage with a minimum storage capacity of 12 pairs.Display correct products.1
", + "bbox": [ + 173, + 671, + 823, + 835 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Table 14: Exemplar shopping tasks on the target website.", + "bbox": [ + 290, + 844, + 702, + 859 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 19 + }, + { + "type": "table", + "img_path": "images/d71556dcaf97c5216a9666f1fe442438dcc1304ce600f3c7a086e9010778a15b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
InstructionCheckpointsScore
Tell me the names of books recommended in the latest five posts in the books forumFind the r/books forum; Find the most recent 5 posts; Give the correct answer.3
Tell me the titles of the 5 most recent posts about little women in the books forumFind the r/books forum; Find little women related posts; Sort the posts by newest.3
What are the recommended products for noise-canceling headphones within a bud-get of $200 in r/headphonesFind the r/headphones forum; Correctly search with noise-canceling, under $200 re-requirements; Return a valid headphone rec-ommendation.3
Find 3 pieces of advices about deal with long-distance relationships in a subreddit for relations.Navigate to a forum about relations; find at least 3 pieces of advice from relevant posts.2
Find if there are any jeep wrangler meetups. If so, when and where?Search in jeep wrangler related forums; Re-turn a valid answer based on the search result.2
", + "bbox": [ + 173, + 157, + 823, + 412 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/f7a8b6011454d378ab9e8ea447740255b4e8ead0c6b591b4e2c4a6fda28334ed.jpg", + "table_caption": [ + "Table 15: Exemplar social forum tasks on the reddit website." + ], + "table_footnote": [], + "table_body": "
InstructionCheckpointsScore
Tell me the full address of all international airports that are within a driving distance of 30 miles to Carnegie Mellon UniversityReturn Pittsburgh International Airport.1
I will arrive Pittsburgh Airport soon. Provide the name of a Hilton hotel in the vicinity, if available. Then, tell me the the walking distance to the nearest supermarket own by a local company from the hotel.Show me the hotels; Find a nearby supermarket; Show me the walking route from the hotel to the supermarket.3
Show me the walking route from nearby hotels to CMU, Pittsburgh that take at most 5 minutes?Find a hotel that meets the walking time requirement; Show me the walking route.2
I am at CMU Pittsburgh, how long it takes to the nearest USPS postal office with different transportation methods?Return travel time by car, by foot, by bus, and by bike.4
Tell me the coordinates of Carnegie Mellon Cafe in DD format.Return the correct coordinates.1
", + "bbox": [ + 173, + 560, + 823, + 839 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Table 16: Exemplar social forum tasks on the Google Maps website.", + "bbox": [ + 253, + 847, + 743, + 863 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + } +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06821/04e4beb1-0999-4f5d-99d2-8bd91900a008_model.json b/data/2025/2504_06xxx/2504.06821/04e4beb1-0999-4f5d-99d2-8bd91900a008_model.json new file mode 100644 index 0000000000000000000000000000000000000000..fefc4c1e81a365543f168edee97ac57008484827 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/04e4beb1-0999-4f5d-99d2-8bd91900a008_model.json @@ -0,0 +1,2904 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.032, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.1, + 0.694, + 0.123 + ], + "angle": 0, + "content": "Inducing Programmatic Skills for Agentic Tasks" + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.147, + 0.717, + 0.163 + ], + "angle": 0, + "content": "Zora Zhiruo Wang Apurva Gandhi Graham Neubig Daniel Fried" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.164, + 0.388, + 0.177 + ], + "angle": 0, + "content": "Carnegie Mellon University" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.177, + 0.538, + 0.191 + ], + "angle": 0, + "content": "{zhiruow,apurvag,gneubig,dfried}@cs.cmu.edu" + }, + { + "type": "title", + "bbox": [ + 0.459, + 0.212, + 0.542, + 0.228 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.244, + 0.771, + 0.499 + ], + "angle": 0, + "content": "To succeed in common digital tasks such as web navigation, agents must carry out a variety of specialized tasks such as searching for products or planning a travel route. To tackle these tasks, agents can bootstrap themselves by learning task-specific skills online through interaction with the web environment. In this work, we demonstrate that programs are an effective representation for skills. We propose agent skill induction (ASI), which allows agents to adapt themselves by inducing, verifying, and utilizing program-based skills on the fly. We start with an evaluation on the WebArena agent benchmark and show that ASI outperforms the static baseline agent and its text-skill counterpart by \\(23.5\\%\\) and \\(11.3\\%\\) in success rate, mainly thanks to the programmatic verification guarantee during the induction phase. ASI also improves efficiency by reducing \\(10.7 - 15.3\\%\\) of the steps over baselines, by composing primitive actions (e.g., click) into higher-level skills (e.g., search_product). We then highlight the efficacy of ASI in remaining efficient and accurate under scaled-up web activities. Finally, we examine the generalizability of induced skills when transferring between websites, and find that ASI can effectively reuse common skills, while also updating incompatible skills to versatile website changes.\\(^{1}\\)" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.523, + 0.321, + 0.539 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.551, + 0.828, + 0.693 + ], + "angle": 0, + "content": "To achieve success in common digital tasks such as web navigation, it is essential for agents to be able to perform a variety of specialized tasks such as searching for products on a shopping website (Yao et al., 2022; Deng et al., 2024) or finding a driving route on the map (Zhou et al., 2024a; Xie et al., 2024a). While one source for agents to learn such tasks is demonstrations annotated by humans (Deng et al., 2024) or synthesized with large language models (LMs) on websites of interest (Murty et al., 2024b;a), this can be a challenging offline learning procedure given the broad range of website domains and functionalities, especially for the collected demonstrations to match or cover the distribution of tasks queried at inference time (Zhou et al., 2024b); not to mention the limitations in resources to collect abundant high-quality data at ease (Pan et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.697, + 0.829, + 0.838 + ], + "angle": 0, + "content": "Instead of learning from demonstrations offline, an alternative way is to learn these tasks directly online from test queries to prevent potential distribution mismatch between demonstration and downstream tasks (Levine et al., 2020). Some works propose to have agents induce casual abstractions (Majumder et al., 2024), single-state guidelines (Fu et al., 2024), or multi-step procedural workflows (Sarch et al., 2024; Wang et al., 2024b) as a form of intermediate knowledge to augment agent memory via non-parametric approaches (Brown et al., 2020). Nonetheless, most existing approaches represent this knowledge in text, offering limited quality and verification guarantees. In this work, we propose that executable programs are effective representations for intermediate skill acquisition, given their verifiability and composability advantages (Setlur et al., 2025)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.843, + 0.828, + 0.903 + ], + "angle": 0, + "content": "We present ASI, namely agent skill induction (§2), that induces and applies programmatic skills along the process of solving user web navigation queries. More concretely, given a natural language (NL) query, the agent first generates an action trajectory attempting to solve the task using built-in, primitive actions such as click and scroll. The agent then" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.909, + 0.542, + 0.926 + ], + "angle": 0, + "content": "1https://github.com/zorazrw/agent-skill-induction" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.276, + 0.061, + 0.725 + ], + "angle": 270, + "content": "arXiv:2504.06821v2 [cs.CL] 29 Aug 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.162 + ], + "angle": 0, + "content": "induces higher-level skills (e.g., search_product(name)) that wrap primitive actions or prior skills as executable programs, accompanied with corresponding test trajectories to verify their quality. Verified skills are then incorporated into the agent action space and can be directly called to solve future tasks with similar procedures, as depicted in Figure 1 (bottom)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.167, + 0.456, + 0.46 + ], + "angle": 0, + "content": "We first evaluate ASI on the WebArena benchmark (Zhou et al., 2024a) (§3) and demonstrate that our online, adaptive ASI surpasses its static agent baseline without adaptive components by \\(23.5\\%\\) in success rate. To validate the advantage of using programmatic representations for skills, we further compare to an adaptive agent, AWM (Wang et al., 2024b), that represents skills in memory as non-executable texts (Figure 1 top); we find ASI scores \\(11.3\\%\\) higher success rate by employing verifiable, programmatic skills (Figure 1 bottom). Beyond the correctness aspect, the task-solving procedures by ASI-supported agents are \\(10.7 - 15.3\\%\\) more efficient than the baseline approaches, mainly because of the action space abstraction and composition enabled by the programmatic skill representation." + }, + { + "type": "image", + "bbox": [ + 0.465, + 0.169, + 0.822, + 0.404 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.462, + 0.411, + 0.825, + 0.456 + ], + "angle": 0, + "content": "Figure 1: Online adaptive agent that induces and reuses programmatic skills as actions (bottom), as opposed to adding textual skills in memory (top)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.465, + 0.827, + 0.552 + ], + "angle": 0, + "content": "We further stress test ASI on scaled-up scenarios (S4) that require substantially longer-horizon trajectories to complete the task. Across various domains such as shopping and social forums, we find the efficiency advantage offered by ASI is more pronounced, reducing action trajectories by 9.5 and 5.6 average steps compared to static and text-form agents. Facilitated by this improved procedural efficiency and planning, we find that ASI agent adheres to the optimal trajectory better and achieves tasks \\(20.7 - 38.9\\%\\) more correctly." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.556, + 0.829, + 0.643 + ], + "angle": 0, + "content": "Finally, we study agent behaviors on generalizing induced skills to other websites (§5), particularly from sandboxed, simulated websites to real-world websites of similar domains. While ASI agents effectively transfer common skills (e.g., search_product) to new websites, some skills may be incompatible with alternative website designs hence less useful. Nonetheless, ASI can quickly refine its prior skills or create new skills on new websites, indicating it allows agents to adapt online while maintaining verifiability via programs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.646, + 0.825, + 0.677 + ], + "angle": 0, + "content": "In short, ASI enhances web agent success and efficiency by inducing and applying verifiable programmatic skills, in general and longer-horizon tasks, even across varied websites." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.695, + 0.403, + 0.714 + ], + "angle": 0, + "content": "2 Agent Skill Induction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.723, + 0.827, + 0.767 + ], + "angle": 0, + "content": "In this section, we first lay out the web agent problem setup (§2.1) and introduce online, self-adaptive agents (§2.2). We then describe the core component of ASI—programmatic skill induction and verification (§2.3)." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.783, + 0.542, + 0.8 + ], + "angle": 0, + "content": "2.1 Problem Statement: Online Adaptive Agent" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.805, + 0.825, + 0.892 + ], + "angle": 0, + "content": "For the scope of this work, we focus on language model (LM) based agents, where each agent policy consists of an LM backbone \\(\\mathcal{L}\\), a memory \\(\\mathcal{M}\\), and a skill library \\(\\mathcal{A}\\), as illustrated in Figure 1 top and bottom. In the implementation, the memory \\(\\mathcal{M}\\) and the skill library \\(\\mathcal{A}\\) are provided as input context to the LM backbone. We denote the agent policy as \\(\\pi_{\\mathcal{L}}(\\cdot|\\mathcal{M},\\mathcal{A})\\) and \\(\\pi_{\\mathcal{L}}\\) for short. We focus on the web browser environment defined by a transition function \\(\\mathcal{T}(s'|s,a)\\) that models the change in the webpage after an action." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.825, + 0.927 + ], + "angle": 0, + "content": "We focus on an online adaptation scenario where we have access to a sequence of NL queries \\( Q = \\{q_{1}, q_{2}, \\dots, q_{N}\\} \\) specifying the tasks, and no other information such as demonstration" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.258 + ], + "angle": 0, + "content": "trajectories or ground-truth rewards are available (Wang et al., 2024a,b). For each task specified by a natural language (NL) query \\( q \\), the agent generates a trajectory of actions \\( \\tau = (s_0, a_0, s_1, a_1, \\dots, s_{H-1}, a_{H-1}, s_H) \\) for a finite number of \\( H \\) steps. At each time step \\( h \\) in the horizon, the agent receives observation \\( o_h \\) from the current state \\( s_h \\), and generates an action \\( a_h \\in \\mathcal{A} \\) based on the observations and actions so far, via \\( \\pi_{\\mathcal{L}}(o_{0:h}, a_{0:h-1}; \\mathcal{M}, \\mathcal{A}) \\to a_h \\). The generated action will be executed on the environment and incurs a state change \\( \\mathcal{T}(s_h, a_h) \\to s_{h+1} \\). This observe-act loop continues for \\( H \\) steps until the task reaches a task-terminating condition, such as the agent generating a termination action (e.g., send msg_to_user) or the horizon reaches a pre-determined maximum number of steps \\( h = H_{max} \\). We denote each pair of query and trajectory \\( (q, \\tau) := e \\) as an episode \\( e \\). Agents can update the content in \\( \\mathcal{M} \\) and \\( \\mathcal{A} \\) and reuse them across episodes." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.277, + 0.403, + 0.292 + ], + "angle": 0, + "content": "2.2 Inducing Reusable Skills" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.3, + 0.825, + 0.388 + ], + "angle": 0, + "content": "To realize online adaptive agents, one common approach is to induce skills from correct trajectories to update the agent (Wang et al., 2024b). But since ground-truth rewards are unavailable, an LLM-based evaluator \\( V_{\\mathcal{L}} \\) is often used to judge the correctness of episodes. Formally, from the total of \\( N \\) episodes throughout the online process \\( \\{e^1,\\dots ,e^N\\} \\coloneqq \\mathcal{E} \\), we employ an LM-based evaluator \\( V_{\\mathcal{L}}(e)\\to 0 / 1 \\) to filter out the episodes predicted as correct \\( \\mathcal{E}_V = \\{e_i\\in \\mathcal{E}|V_{\\mathcal{L}}(e_i) = 1,i\\in \\{1,\\dots ,N\\} \\} \\) and perform skill induction only on \\( \\mathcal{E}_V \\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.393, + 0.827, + 0.507 + ], + "angle": 0, + "content": "Central to our adaptive agents is an induction component \\( I \\) that enables the adaptivity of agents, which can be rule-based (Ellis et al., 2023; Grand et al., 2024) or instantiated by an LM \\( I(\\cdot |LM) \\) (Wang et al., 2024b); we follow the latter for its better performance and use \\( I \\) to represent the module for simplicity. For online adaptive agents \\( \\pi_{\\mathcal{L}} \\), to induce skills, \\( I \\) is instructed to take in one filtered episode \\( e \\) and output one or more pieces of desired skills \\( D = \\{d\\} \\), denoted as \\( I(e)\\to \\mathcal{D} \\). Following AWM (Wang et al., 2024b), we update the agent in non-parametric ways that incorporate the induction outcome \\( I(e_t)\\rightarrow d_t \\) into the agent, instead of updating the parameters of the underlying LM backbone \\( \\mathcal{L} \\) for agent policy \\( \\pi_{\\mathcal{L}} \\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.511, + 0.825, + 0.569 + ], + "angle": 0, + "content": "Unlike AWM which represents skills in free-form text representations and can only augment agent memory via \\(\\mathcal{M}_t \\cup \\{d_t\\} \\to \\mathcal{M}_{t+1}\\) (Figure 1 top), we introduce ASI that represents skills as executable python programs, and directly integrate skills into the agent action space instead, via \\(\\mathcal{A}_t \\cup \\{d_t\\} \\to \\mathcal{A}_{t+1}\\) (Figure 1 bottom)." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.587, + 0.544, + 0.603 + ], + "angle": 0, + "content": "2.3 Inducing and Verifying Programmatic Skills" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.61, + 0.825, + 0.681 + ], + "angle": 0, + "content": "To improve the induction quality, we propose a change in representation from free-form text to executable programs, which offers advantages in correctness and efficiency. For one, the program format enables ready verification on skill correctness by executing them; for another, skill programs abstract multiple lower-level actions into a higher-level function call, thus agents can solve tasks in fewer steps without tackling tricky low-level details." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.687, + 0.827, + 0.786 + ], + "angle": 0, + "content": "Inducing Programmatic Skills We first clean the input episodes to ensure the induction quality. We remove all the steps that cause execution errors such as invalid argument format, to keep these invalid actions from distracting agent predictions. Furthermore, noticing the long and possibly redundant thought process generated by agents along with each action, we simplify each thought text paragraph into a short one-sentence description (e.g., \"Clicked the directions button to access the route planning feature\") using LM, effectively reducing the thought content from 87.9 to 13.4 tokens per step." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.791, + 0.827, + 0.863 + ], + "angle": 0, + "content": "Given a clean input episode \\( e \\), we now prompt the induction module \\( I \\) to produce one or more program functions to represent reusable skills \\( \\mathcal{D} = \\{d\\} \\) as executable programs. As exemplified in Figure 2, given the input episode on the left side, the induction module first produces two skills open网络营销reviews() and searchreviews_search_box_id, search_button_id, search_term) in the form of callable program functions." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.868, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Skill Verification With the programmatic nature of ASI's skills, we can readily verify their correctness by executing them and checking if tasks can be solved successfully. While a naive way is to query the agent with the same NL query and allow it to use newly induced skill actions, we find agents may not always use new skills due to the large search space of" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.195, + 0.087, + 0.805, + 0.23 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.191, + 0.235, + 0.807, + 0.253 + ], + "angle": 0, + "content": "Figure 2: Inducing programmatic skills and rewriting the trajectory from an episode." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.27, + 0.828, + 0.495 + ], + "angle": 0, + "content": "possible action trajectories. To have agents more efficiently generate trajectories that test skills in a more targeted way, we curate a rewritten trajectory prefix \\(\\tau_{D}\\) to constrain the first few steps executed in the environment, by rewriting and truncating the input action trajectory \\(\\tau\\), and subsequently asking the agent to complete the prefix to get a full, checkable trajectory \\(\\tau_{f}\\). Concretely, we first take the original action trajectory in the input episode \\(\\tau\\) (consisting of primitive actions or previously learned skills), and ask the induction module \\(I\\) to transform it to a skill-using trajectory (Figure 2 bottom right), by replacing sub-trajectories in \\(\\tau\\) with calls to the newly induced skill programs \\(\\mathcal{D}\\), if possible. Zooming into the Figure 2 example, this procedure merges click('Marketing') \\(\\rightarrow\\) click('All Reviews') to an open_marketreviews() call; transforms fill(757, 'satisfied') \\(\\rightarrow\\) click('Search') to a call of the second skill searchreviews('satisfied') with the specified term 'satisfied'; and adopted the last send msg_to_user('2') step directly. Note that we follow Wang et al. (2024b) and induce skills according to each website, so some skills could be tailored to particular webpage contexts such as the 'Marketing' and 'All Reviews' link constants in open_marketreviews, while other skills apply to more versatile setups such as searching for different reviews in searchreviews." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.501, + 0.825, + 0.601 + ], + "angle": 0, + "content": "Next, to avoid spurious successes in skill verification, we truncate the trajectory yielded above by removing any trailing primitive actions after the last call to a skill program. Taking Figure 2 as an example, in the original input trajectory, the last send msg_to_user('2') already sends over the correct answer '2' to the user. However, if we directly adopt this last step into the skill-using trajectory \\(\\tau_{D}\\), then executing it will always return the correct message to the user, regardless of whether the previous skill calls are valid. We thus remove such trailing actions to make sure verification attends to the induced skills we are testing." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.606, + 0.827, + 0.722 + ], + "angle": 0, + "content": "After rewriting and truncation, we get this skill-using trajectory \\(\\tau_{D}\\) as the prefix to test skills. We now query the agent \\(\\pi\\) again with the same NL query \\(q\\) and first execute \\(\\tau_{D}\\) on the environment. We then allow agents to continue generating up to \\(H_{max} - |\\tau_{D}|\\) actions to finish the task. In the Figure 2 example, to successfully solve query \\(q\\), we expect the agent to generate another step of send msg_to_user(...). with the correct answer '2' in the message. We take the concatenation of the trajectory prefix \\(\\tau_{D}\\) and the later additionally produced new steps \\(\\tau_{A}\\) (e.g., [send msg_to_user('2')]) as the full trajectory \\(\\tau_{f}\\). We then decide whether to add the induced programs \\(\\mathcal{D}\\) into the agent skill library as \\(\\mathcal{A}\\) by examining \\(\\tau_{f}\\)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.727, + 0.828, + 0.817 + ], + "angle": 0, + "content": "Specifically, we check \\(\\tau_{f}\\) from three dimensions: (1) Correctness: if executing \\(\\tau_{f}\\) successfully solves the task \\(q\\) as judged by the neural model evaluator \\(V_{\\mathcal{L}}\\); (2) Skill Usage: if the trajectory contains at least one call to at least one new skill in \\(\\mathcal{D}\\); and (3) Skill Validity: if all skill-calling actions cause environment changes. If all three boxes are checked, we add the skills being called in the trajectory \\(\\tau_{f}\\) to the agent skill library \\(\\mathcal{A}_{t} \\cup \\mathcal{D}_{\\text{called}} \\rightarrow \\mathcal{A}_{t+1}\\). By adding \\(\\mathcal{D}_{\\text{called}}\\), the agent can now generate actions that call these skill programs to solve subsequent tasks." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.837, + 0.547, + 0.856 + ], + "angle": 0, + "content": "3 General Web Navigation Performance" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.87, + 0.348, + 0.885 + ], + "angle": 0, + "content": "3.1 Experiment Setup" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.896, + 0.826, + 0.927 + ], + "angle": 0, + "content": "Benchmark and Evaluation To evaluate ASI on general web navigation scenarios, we adopt the WebArena benchmark (Zhou et al., 2024a) that contains 812 test examples covering" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "table", + "bbox": [ + 0.179, + 0.09, + 0.817, + 0.189 + ], + "angle": 0, + "content": "
ModelMethod# StepsSRShopAdminRedditGitLabMapsMulti
GPTVanilla-12.313.910.46.615.015.68.3
AWM5.935.532.129.154.735.042.218.8
ClaudeVanilla5.632.732.636.836.826.138.520.8
AWM5.936.334.839.051.928.939.418.8
ASI (ours)5.040.440.144.054.732.243.120.8
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.197, + 0.825, + 0.227 + ], + "angle": 0, + "content": "Table 1: WebArena success rate by adaptive agents with programmatic skills, in comparison to a static vanilla agent baseline, and a text-skill learning adaptive agent." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.242, + 0.827, + 0.327 + ], + "angle": 0, + "content": "five major web activity domains: e-commerce, social forum, software development, content management, and travel. Each example in WebArena has an NL query \\( q \\) for the task, and a program-based evaluator that provides a binary \\( 0/1 \\) score for any given trajectory \\( \\tau \\) to judge if it successfully solves the task \\( q \\). This program-based evaluator enables relatively rigorous evaluation based on the functional correctness of the action trajectory. We report the average score across all WebArena examples, if not specified otherwise." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.333, + 0.828, + 0.405 + ], + "angle": 0, + "content": "Backbone LM and Agent Architecture We use the top-performing claude-3.5-sonnet model as the LM backbone for all components, including the agent policy \\(\\pi\\), the neural evaluator \\(V\\), and the skill induction modules \\(I\\). For experimentation, we use the BrowserGym (Chezelles et al., 2024) framework, which takes the webpage accessibility tree as observation, and instantiates the skill library \\(\\mathcal{A}\\) with the WebArena default action space listed in \\(\\S A\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.41, + 0.827, + 0.522 + ], + "angle": 0, + "content": "Baselines We take the vanilla Claude model with the BrowserGym framework (Drouin et al., 2024) as the non-adaptive agent baseline. Additionally, we compare ASI to AWM (Wang et al., 2024b), the current top-performing online adaptive web agent method. Because AWM was originally developed with the gpt-4o model, for a fairer comparison, we also experiment with AWM with claude-3.5-sonnet model as its LM backbone and also apply the episode cleaning procedure to enhance induction quality. We compare the two baseline methods with our ASI approach. We provide the complete prompts for each agent component: task-solving, episode evaluation, episode cleaning, and skill induction, in SA." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.54, + 0.373, + 0.555 + ], + "angle": 0, + "content": "3.2 Results and Analysis" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.566, + 0.827, + 0.693 + ], + "angle": 0, + "content": "In Table 1, compared to the vanilla static-agent baseline, adaptive agents (AWM and ASI) generally achieve \\(11.0 - 23.5\\%\\) higher success rates overall. Among adaptive agents, our ASI with programmatic skills, achieves another \\(11.3\\%\\) success rate gain across websites, compared to its AWM counterpart that induces and uses textual skills. Meanwhile, ASI offers additional efficiency benefits by reducing the number of steps in solutions by \\(15.3\\%\\) and \\(10.6\\%\\) than vanilla and AWM agents, as one skill-call action can often execute multiple steps written in primitive actions used by vanilla and AWM agents. These advantages in correctness and efficiency are exhibited prominently across different websites and tasks, as shown by the website breakdown on Table 1 (right). Refer to \\(\\S B\\) for more analysis." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.71, + 0.488, + 0.725 + ], + "angle": 0, + "content": "3.3 Why are Programmatic Skills Better?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.736, + 0.827, + 0.793 + ], + "angle": 0, + "content": "To more concretely answer why programmatic skills are more effective than textual skills, we take a closer look on the two main differences between AWM and ASI: [1] whether the induction outcome is verified via execution, and [2] whether the induced skills are provided in memory for reference purpose only, or in the action space that allows execution." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.8, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Better Induction Quality We take the shopping website as a representative, and analyze the textual and program skills induced by AWM and ASI agents. We group textual and program skills by their functionality and show one representative example in Table 2. Compared to the clear functional boundary and highly-reusable granularity of the search_product skill, we find that the textual skills often have (1) more redundant steps, (2) example-specific context: e.g., the last text skill aims to find 'game accessories' while the steps generally apply to any product, and (3) fuzzier boundaries between separable tasks, e.g., the first skill mixes product-search and add-to-wishlist procedures, thus may not offer optimal guidance when asked to, e.g., search product and add it to cart instead." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.089, + 0.824, + 0.393 + ], + "angle": 0, + "content": "
Programmatic SkillsTextual Skills
def search_product_search_box_id: str, query: str):\n ""Search for a product using the search box.\n Args:\n search_box_id: ID of the search input field\n query: Search query string to enter\n Returns:\n None\nExamples:\n search_product('595', 'sony bluetooth headphones')\n ""\nclick_search_box_id)\nfill_search_box_id, query)\nkeyboard_press('Enter'))Task: Search for a product and add it to wish list\nAction Trajectory:\n __fill(621, {product_name})__ # Enter the product\nname in the search box\n __click(478)__ # Click the search button to execute the\nsearch\n __click({product_link})__ # Click the product to check\nmore details\n __click(1769)__ # Click the "Add to Wish List" link
Task: Search for a product's price range in the store\nAction Trajectory:\n __fill(565, {product_name})__ # Enter the product\nname in the search box\n __click(570)__ # Click the search button to execute the\nsearch\n __noop(1000)__ # Wait for search results to load\n __send msg_to_user({price_range_info})__ # \nAnalyze and report the price range findings from the\nsearch results
Task: Search for gaming accessories within a date range\nAction Trajectory:\n __click(1274)__ # Navigate to the Video Games category\n __fill(473, {search_terms})__ # Enter search terms\nincluding product name and year\n __click(478)__ # Execute the search
" + }, + { + "type": "table_caption", + "bbox": [ + 0.214, + 0.4, + 0.782, + 0.417 + ], + "angle": 0, + "content": "Table 2: Example textual and program skills induced on the shopping website." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.435, + 0.828, + 0.633 + ], + "angle": 0, + "content": "Verified Induction Improves End Success Rate From qualitative examination of the induction outcomes, we find roughly similar numbers of episodes evaluated as correct and used for induction (70 and 58 examples for AWM and ASI), ASI produced programs pass verification for only \\(15.6\\%\\) of the turns, whereas AWM adds new skills for \\(31.4\\%\\) of the time (replace or add none otherwise). While skill usage (in memory or as action, [2]) is designated for AWM and ASI, we hypothesize that verification [1] affects induction quality and thus end success. We thus experiment with another setting that induces programs (such that verification is enabled), and only use the induced skills in memory, to study the importance of induction quality. As shown in Table 3, inducing skills with execution-based verification (i.e., (unverified, text) \\(\\rightarrow\\) (verified, program)), while always present skills in memory, improves end success rate by 4.2 points, indicating the importance of higher-quality induction via verification. Yet it is still 3.7 points lower than ASI, suggesting the incompatibility of program format to agent memory. Indeed, we observe many cases where the agent tries to call the skill programs but unsuccessfully, since they are not supported in the action space." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.638, + 0.486, + 0.776 + ], + "angle": 0, + "content": "Textual Representations Suit Memory Better To prevent the agent from trying to call these plausible programs, we ablate another setting that transforms program skills to textual format (as Table 2 right) and provide them in agent memory, dubbed (verified, text). This format transformation effectively improves the overall success rate by another 2.6 points, getting a little closer to ASI. Given the different downstream usage," + }, + { + "type": "table", + "bbox": [ + 0.498, + 0.642, + 0.822, + 0.733 + ], + "angle": 0, + "content": "
MethodSR
Add to Memoryunverified, text32.6
verified, program36.4
verified, text39.0
Add as Actionsverified, program40.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.495, + 0.737, + 0.828, + 0.767 + ], + "angle": 0, + "content": "Table 3: Ablation study on induction verification and format on the shopping website." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.777, + 0.827, + 0.792 + ], + "angle": 0, + "content": "i.e., memory or actuation, textual and program formats may suit individual scenarios better." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.797, + 0.826, + 0.827 + ], + "angle": 0, + "content": "Beyond basic web navigation tasks, in the next two sections, we examine agents in two other important scenarios, scaled-up activities (§4) and cross-website generalization (§5)." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.849, + 0.482, + 0.868 + ], + "angle": 0, + "content": "4 Scaled-Up Browsing Activities" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.882, + 0.826, + 0.927 + ], + "angle": 0, + "content": "The WebArena benchmark mainly features isolated, single-task scenarios, such as adding a single product to the shopping cart. However, in real-world practices, people need to do a series of such tasks together, such as adding multiple related products (e.g., coffee and" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.162 + ], + "angle": 0, + "content": "mug) to the cart before finally checking out. This browsing request can lead to extremely long-horizon tasks, sometimes with repetitive intermediate procedures. We identify this to be a scenario to further demonstrate the efficacy of program skills, as opposed to textual skills, as programs lend themselves naturally to repeated invocation and composition." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.167, + 0.827, + 0.266 + ], + "angle": 0, + "content": "Therefore, we curate several case scenarios where the user asks for action-dense instructions, such as the tasks listed in Figure 3. Because the tasks are long-horizon and involve multiple sub-tasks, we follow Xu et al. (2024) and set up intermediate checkpoints to better track the intermediate progress of agents. Refer to SC.1 to see the full list of tasks and their evaluation checkpoints. We measure the success rate of each example by the percentage of checkpoints achieved by the agent. We report the average success rate of all examples, as well as the average number of steps taken to solve the tasks, in Table 4." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.275, + 0.825, + 0.348 + ], + "angle": 0, + "content": "
MethodShoppingAdminRedditGitLabMap
sr ↑# steps ↓sr ↑# steps ↓sr ↑# steps ↓sr ↑# steps ↓sr ↑# steps ↓
VANILLA41.723.558.020.833.323.033.340.040.015.2
AWM68.321.574.018.240.016.850.033.865.012.6
ASI (ours)100.016.391.014.255.012.855.025.4100.06.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.355, + 0.827, + 0.4 + ], + "angle": 0, + "content": "Table 4: Performance of vanilla, AWM, and ASI agents in scaled-up browsing scenarios. We perform statistical testing between ASI and each baseline and verify all improvements are statistically significant with t-statistics \\( |t| > 2 \\) and \\( p < 0.05 \\); see SC.3 for more details." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.412, + 0.826, + 0.47 + ], + "angle": 0, + "content": "ASI Features Improved Efficiency Across all websites, ASI-produced trajectories have 6.6-14.6 and \\(4.0 - 8.4\\%\\) fewer steps, compared to vanilla and AWM baselines, respectively. As the task horizon continues to grow when involving more intermediate checkpoints, this margin between ASI and baselines will predictably be more prominent." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.475, + 0.825, + 0.547 + ], + "angle": 0, + "content": "Subsequent Benefits in Success Rate ASI also achieves higher success rates with more efficient trajectories, outperforming vanilla and AWM baselines by \\(38.9\\%\\) and \\(20.7\\%\\) on average. From manual analysis, we find this improvement comes from easier, better agent planning when using higher-level skills, without the need to tackle more complex procedures if only low-level primitive actions are available, as with vanilla and AWM agents." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.552, + 0.827, + 0.624 + ], + "angle": 0, + "content": "Case Study: Changing Multiple Addresses We present a representative case on the shopping website: changing billing and shipping addresses after moving. As depicted in the top row in Figure 3, the vanilla agent without adaptive skills often roams into some irrelevant exploration steps, instead of sticking to the optimal route to solve the required task. It runs for minutes and exhausts the maximum steps (i.e., 50) before finishing the task." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.629, + 0.827, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.182, + 0.909, + 0.816, + 0.927 + ], + "angle": 0, + "content": "Figure 3: Example scaled-up task of updating multiple addresses on shopping website." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.103, + 0.827, + 0.162 + ], + "angle": 0, + "content": "With AWM, adding textual skills in memory provides soft guidelines for agents to follow, the agent thus better sticks to the goal and finishes each part of the task (i.e., navigate to the address page, update billing/shipping address) step by step. Although successful, the trajectory is long, i.e., 27 steps, and still takes a few minutes to finish." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.167, + 0.825, + 0.225 + ], + "angle": 0, + "content": "In comparison, ASI (in Figure 3 bottom row) showcases its efficiency by using learned skills to navigate_to_address_settings and update_address_details can solve each part in one step (vs. the 3-6 steps used by AWM for these parts). Overall, ASI correctly finishes all required actions in only 4 steps, shortening the horizon by \\(85.2\\%\\) compared to AWM." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.247, + 0.442, + 0.267 + ], + "angle": 0, + "content": "5 Adapting Across Websites" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.281, + 0.422, + 0.392 + ], + "angle": 0, + "content": "To examine whether agents can generalize with learned skills, we test agents on real-world website counterparts for some of the domains in WebArena as listed in Table 5. This experiment setup can reflect on (1) transfer across different websites of the same domain," + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.392, + 0.658, + 0.406 + ], + "angle": 0, + "content": "and (2) transfer from simulated, sandboxed to real-world websites." + }, + { + "type": "table", + "bbox": [ + 0.436, + 0.283, + 0.818, + 0.35 + ], + "angle": 0, + "content": "
DomainWebArena SandboxedReal-World
shoppingOneStopMarketTarget
online forumPostMillReddit
travelOpenStreetMapGoogle Maps
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.43, + 0.353, + 0.825, + 0.381 + ], + "angle": 0, + "content": "Table 5: Real-world in-domain website counterparts to each WebArena sandboxed website." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.412, + 0.827, + 0.485 + ], + "angle": 0, + "content": "For each sandbox-real website pair, we take ten information-seeking style queries (He et al., 2024) in WebArena that do not involve potential privacy leakage or unrecoverable risky actions, such as making a purchase or changing user password. We provide the task details in §C.2. We compare ASI and AWM with their programmatic and textual skills as learned in §3, as well as comparing to the vanilla static agent baseline." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.49, + 0.388, + 0.575 + ], + "angle": 0, + "content": "Transferring Common Skills In Figure 4, we can see how ASI can effectively reuse common skills such as search_product in the first step on the Target website." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.58, + 0.391, + 0.707 + ], + "angle": 0, + "content": "Incompatible Skills One challenge faced by ASI is that some prior skills become incompatible on the new website. For example, the sort_by Listings() induced on OneStopMarket selects options from a dropdown menu, yet sorting on" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.706, + 0.825, + 0.749 + ], + "angle": 0, + "content": "the Target website opens a sidebar; despite their semantic similarity, the concrete actions in skill programs are no longer applicable. Still, we find that agents can often spot this incompatibility and rarely attempt to use these deprecated skills." + }, + { + "type": "image", + "bbox": [ + 0.4, + 0.496, + 0.818, + 0.661 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.398, + 0.669, + 0.825, + 0.699 + ], + "angle": 0, + "content": "Figure 4: ASI can generalize the search_product skill but face incompatibility when sorting items." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.754, + 0.825, + 0.77 + ], + "angle": 0, + "content": "Adapting Skills to New Environment Although some skills induced on previous websites" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.77, + 0.37, + 0.895 + ], + "angle": 0, + "content": "cannot be directly used on arbitrary new websites, we hypothesize that these skills can still serve as informative references on solving procedurally similar tasks or composing new skills targeted for the new website design." + }, + { + "type": "table", + "bbox": [ + 0.384, + 0.773, + 0.818, + 0.871 + ], + "angle": 0, + "content": "
MethodShoppingRedditMap
sr ↑# steps ↓sr ↑# steps ↓sr ↑# steps ↓
Vanilla80.05.440.04.863.37.4
AWM80.05.056.74.8100.06.2
ASI90.03.476.74.493.34.4
AWM + update80.05.463.35.8100.07.2
ASI + update90.03.276.74.093.34.2
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.379, + 0.875, + 0.827, + 0.903 + ], + "angle": 0, + "content": "Table 6: Cross-website results. ASI significantly surpasses baselines in sr and # steps (with \\( |t| > 2 \\) and \\( p < 0.05 \\)) from our analysis in §C.3." + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.824, + 0.925 + ], + "angle": 0, + "content": "2We did not test on administrative and software websites given their more severe safety concerns." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.829, + 0.177 + ], + "angle": 0, + "content": "We thus allow agents to induce new skills or update previously acquired skills from experiences on the new website, denoted as +update entries in Table 6. We find that enabling skill update in both textual and program formats helps agent performance on new websites. Within the short online learning process (tens of examples), AWM adapts faster to the new websites, while ASI sees a more pronounced improvement in efficiency." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.197, + 0.33, + 0.213 + ], + "angle": 0, + "content": "6 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.23, + 0.828, + 0.427 + ], + "angle": 0, + "content": "Adaptive Digital Agents An important thread of agent-improving methods is to build adaptive agents that can autonomously self-improve from experiences. Most works focus on integrating past experiences into agent memory by collecting human annotation (Deng et al., 2024) or LM-based synthesis (Ou et al., 2024; Xu et al., 2025), especially via agent-driven exploration with instruction- (Murty et al., 2024b) or trajectory-driven (Murty et al., 2024a) approaches, offering warm starts on the websites of interest. Other works gather experiences (Wang et al., 2024b) or feedback (Qu et al., 2024) during test time, and augment them into memory through parametric channels such as supervised fine-tuning (Murty et al., 2024a), contrastive learning (Song et al., 2024), or reinforcement learning (Zhou et al., 2024b). Meanwhile, non-parametric approaches can directly augment reflections (Shinn et al., 2023), raw past experiences (Wang et al., 2023; Zheng et al., 2023), or further induced reusable workflows (Wang et al., 2024b). While these adaptive agents learn textual skills stored in memory, our ASI stores skills as verifiable and composable programs in the agent action space (i.e., skill library), thus enabling better quality and efficiency." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.431, + 0.829, + 0.613 + ], + "angle": 0, + "content": "Skill Discovery and Learning Learning specialized skills for tasks in programmatic (Shin et al., 2019; Ellis et al., 2023; Cai et al., 2024; Wang et al., 2024a; Grand et al., 2024), embodied (Sharma et al., 2022; Wang et al., 2023; Liang et al., 2023; Sarch et al., 2024; Wong et al., 2024), and physical (Yu et al., 2023) environments has shown to success in agent performance. Particularly for digital agents built for web navigation tasks, most works focus on exploring skills offline with RL roll-outs (Gur et al., 2018; Liu et al., 2018; Putta et al., 2024; Qi et al., 2024) or LM-based prompting (Zhou et al., 2024b; Murty et al., 2024a; Patel et al., 2024). While this exploration stage could offer some supervised data to update the agent policy either parametric (Murty et al., 2024a; Patel et al., 2024) or non-parametrically (Zheng et al., 2023; Murty et al., 2024b), it often costs enormous extra computation and may suffer from the lack or mismatch in distribution with the downstream tasks at hand (Wang et al., 2024b). In contrast, our ASI does not rely on supervised data and can directly learn skills online without prior exploration." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.619, + 0.829, + 0.731 + ], + "angle": 0, + "content": "Web Navigation Benchmarks Digital agents have been explored across a wide range of tasks (Yao et al., 2024; Kapoor et al., 2025; Xie et al., 2024b), among which one of the most popular application being browsing and navigating through versatile websites such as shopping (Yao et al., 2022), social media communication (Zhou et al., 2024a; Koh et al., 2024), knowledge work tasks (Drouin et al., 2024), and more (Deng et al., 2024). Our work focuses on general web navigation tasks using the WebArena (Zhou et al., 2024a) benchmark, meanwhile exploring other challenging scenarios such as scaled-up activities (Yoran et al., 2024) and cross-domain generalization (Deng et al., 2024)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.753, + 0.525, + 0.769 + ], + "angle": 0, + "content": "7 Conclusion and Future Discussions" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.785, + 0.829, + 0.927 + ], + "angle": 0, + "content": "In this work, we present ASI to support web navigation agents to autonomously induce, verify, learn, and apply programmatic skills during online inference. Beyond achieving \\(23.5\\%\\) success rate and \\(15.3\\%\\) efficiency increases in general web tasks, we also showcase ASI's strengths for scaled-up web activities, thanks to the high-level action interface offered by the programmatic abstraction. Moreover, we examine skill generalizability to new, real-world websites, and find ASI still offers great efficiency while flexibly updating skills to new environments. While our work aims to offer insights on the optimal representation in agent skill acquisition, we still find multiple pieces in ASI worthy of further investigation, such as the conceptually or empirically suitable granularity of skills, the stability of the online evolving process, and the skill quality in comparison to human expert desiderata." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.347, + 0.12 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.135, + 0.827, + 0.193 + ], + "angle": 0, + "content": "We would like to thank Jiayuan Mao, Yueqi Song, Boyuan Zheng, and Yu Su for the insightful discussions. We thank Yiqing Xie, Xinran Zhao, and Mingqian Zheng for their helpful comments on the paper draft. Zora is supported by the CMU Presidential Fellowship and Fujitsu Research. Apurva is supported by Amazon." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.212, + 0.276, + 0.228 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.237, + 0.827, + 0.379 + ], + "angle": 0, + "content": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin (eds.), Advances in Neural Information Processing Systems, volume 33, pp. 1877-1901. Curran Associates, Inc., 2020. URL https://proceedings.neurips.cc/paper_files/paper/2020/file/1457c0d6bfcb4967418bf8ac142f64a-Paper.pdf." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.387, + 0.829, + 0.433 + ], + "angle": 0, + "content": "Tianle Cai, Xuezhi Wang, Tengyu Ma, Xinyun Chen, and Denny Zhou. Large language models as tool makers. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=qV83K9d5WB." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.44, + 0.828, + 0.486 + ], + "angle": 0, + "content": "De Chezelles, Thibault Le Sellier, Maxime Gasse, Alexandre Lacoste, Alexandre Drouin, Massimo Caccia, Léo Boisvert, Megh Thakkar, Tom Marty, Rim Assouel, et al. The browsergym ecosystem for web agent research. arXiv preprint arXiv:2412.05467, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.493, + 0.826, + 0.538 + ], + "angle": 0, + "content": "Xiang Deng, Yu Gu, Boyuan Zheng, Shijie Chen, Sam Stevens, Boshi Wang, Huan Sun, and Yu Su. Mind2web: Towards a generalist agent for the web. Advances in Neural Information Processing Systems, 36, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.547, + 0.828, + 0.605 + ], + "angle": 0, + "content": "Alexandre Drouin, Maxime Gasse, Massimo Caccia, Issam H Laradji, Manuel Del Verme, Tom Marty, Léo Boisvert, Megh Thakkar, Quentin Cappart, David Vazquez, et al. Workarena: How capable are web agents at solving common knowledge work tasks? arXiv preprint arXiv:2403.07718, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.613, + 0.828, + 0.672 + ], + "angle": 0, + "content": "Kevin Ellis, Lionel Wong, Maxwell Nye, Mathias Sable-Meyer, Luc Cary, Lore Anaya Pozo, Luke Hewitt, Armando Solar-Lezama, and Joshua B Tenenbaum. Dreamcoder: growing generalizable, interpretable knowledge with wake-sleep bayesian program learning. Philosophical Transactions of the Royal Society A, 381(2251):20220050, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.68, + 0.828, + 0.739 + ], + "angle": 0, + "content": "Yao Fu, Dong-Ki Kim, Jaekyeom Kim, Sungryull Sohn, Lajanugen Logeswaran, Kyunghoon Bae, and Honglak Lee. Autoguide: Automated generation and selection of state-aware guidelines for large language model agents. CoRR, abs/2403.08978, 2024. URL https://doi.org/10.48550/arXiv.2403.08978." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.747, + 0.828, + 0.806 + ], + "angle": 0, + "content": "Gabriel Grand, Lionel Wong, Matthew Bowers, Theo X. Olausson, Muxin Liu, Joshua B. Tenenbaum, and Jacob Andreas. LILO: Learning interpretable libraries by compressing and documenting code. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=TqYbAWKMJe." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.814, + 0.825, + 0.845 + ], + "angle": 0, + "content": "Izzeddin Gur, Ulrich Rückert, Aleksandra Faust, and Dilek Hakkani-Tür. Learning to navigate the web. CoRR, abs/1812.09195, 2018. URL http://arxiv.org/abs/1812.09195." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.853, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Hongliang He, Wenlin Yao, Kaixin Ma, Wenhao Yu, Yong Dai, Hongming Zhang, Zhenzhong Lan, and Dong Yu. Webvoyager: Building an end-to-end web agent with large multimodal models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics, 2024. URL https://aclanthology.org/2024.acl-long.371/." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.237, + 0.829, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.826, + 0.162 + ], + "angle": 0, + "content": "Raghav Kapoor, Yash Parag Butala, Melisa Russak, Jing Yu Koh, Kiran Kamble, Waseem AlShikh, and Ruslan Salakhutdinov. Omniact: A dataset and benchmark for enabling multimodal generalist autonomous agents for desktop and web. In European Conference on Computer Vision, pp. 161-178. Springer, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.169, + 0.827, + 0.227 + ], + "angle": 0, + "content": "Jing Yu Koh, Robert Lo, Lawrence Jang, Vikram Duvvur, Ming Chong Lim, Po-Yu Huang, Graham Neubig, Shuyan Zhou, Ruslan Salakhutdinov, and Daniel Fried. Visualwebarena: Evaluating multimodal agents on realistic visual web tasks. arXiv preprint arXiv:2401.13649, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.235, + 0.827, + 0.279 + ], + "angle": 0, + "content": "Sergey Levine, Aviral Kumar, George Tucker, and Justin Fu. Offline reinforcement learning: Tutorial, review, and perspectives on open problems. arXiv preprint arXiv:2005.01643, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.288, + 0.827, + 0.344 + ], + "angle": 0, + "content": "Jacky Liang, Wenlong Huang, Fei Xia, Peng Xu, Karol Hausman, Brian Ichter, Pete Florence, and Andy Zeng. Code as policies: Language model programs for embodied control. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pp. 9493-9500. IEEE, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.354, + 0.825, + 0.399 + ], + "angle": 0, + "content": "Evan Zheran Liu, Kelvin Guu, Panupong Pasupat, and Percy Liang. Reinforcement learning on web interfaces using workflow-guided exploration. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id=ryTp3f-0-." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.407, + 0.827, + 0.464 + ], + "angle": 0, + "content": "Bodhisattwa Prasad Majumder, Bhavana Dalvi, Peter Jansen, Oyvind Tafjord, Niket Tandon, Li Zhang, Chris Callison-Burch, and Peter Clark. CLIN: A continually learning language agent for rapid task adaptation and generalization, 2024. URL https://openreview.net/forum?id=d5DGVHMsC." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.473, + 0.827, + 0.517 + ], + "angle": 0, + "content": "Shikhar Murty, Dzmitry Bahdanau, and Christopher D. Manning. Nnetscape navigator: Complex demonstrations for web agents without a demonstrator, 2024a. URL https://arxiv.org/abs/2410.02907." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.525, + 0.827, + 0.57 + ], + "angle": 0, + "content": "Shikhar Murty, Christopher Manning, Peter Shaw, Mandar Joshi, and Kenton Lee. Bagel: Bootstrapping agents by guiding exploration with language, 2024b. URL https://arxiv.org/abs/2403.08140." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.578, + 0.827, + 0.65 + ], + "angle": 0, + "content": "Tianyue Ou, Frank F. Xu, Aman Madaan, Jiarui Liu, Robert Lo, Abishek Sridhar, Sudipta Sengupta, Dan Roth, Graham Neubig, and Shuyan Zhou. Synatra: Turning indirect knowledge into direct demonstrations for digital agents at scale. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=KjNEzWRIqn." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.658, + 0.827, + 0.716 + ], + "angle": 0, + "content": "Yichen Pan, Dehan Kong, Sida Zhou, Cheng Cui, Yifei Leng, Bing Jiang, Hangyu Liu, Yanyi Shang, Shuyan Zhou, Tongshuang Wu, and Zhengyang Wu. Webcanvas: Benchmarking web agents in online environments. In Agentic Markets Workshop at ICML 2024, 2024. URL https://openreview.net/forum?id=O1FaGasJob." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.724, + 0.827, + 0.768 + ], + "angle": 0, + "content": "Ajay Patel, Markus Hofmarcher, Claudiu Leoveanu-Condrei, Marius-Constantin Dinu, Chris Callison-Burch, and Sepp Hochreiter. Large language models can self-improve at web agent tasks. arXiv preprint arXiv:2405.20309, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.776, + 0.827, + 0.82 + ], + "angle": 0, + "content": "Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv preprint arXiv:2408.07199, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.828, + 0.825, + 0.873 + ], + "angle": 0, + "content": "Zehan Qi, Xiao Liu, Iat Long Iong, Hanyu Lai, Xueqiao Sun, Wenyi Zhao, Yu Yang, Xinyue Yang, Jiadai Sun, Shuntian Yao, et al. Webrl: Training llm web agents via self-evolving online curriculum reinforcement learning. arXiv preprint arXiv:2411.02337, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.881, + 0.827, + 0.924 + ], + "angle": 0, + "content": "Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive introspection: Teaching language model agents how to self-improve, 2024. URL https://arxiv.org/abs/2407.18219." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.103, + 0.827, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.827, + 0.161 + ], + "angle": 0, + "content": "Gabriel Sarch, Lawrence Jang, Michael Tarr, William W Cohen, Kenneth Marino, and Katerina Fragkiadaki. Vlm agents generate their own memories: Distilling experience into embodied programs of thought. Advances in Neural Information Processing Systems, 37: 75942-75985, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.169, + 0.826, + 0.2 + ], + "angle": 0, + "content": "Amrith Setlur, Nived Rajaraman, Sergey Levine, and Aviral Kumar. Scaling test-time compute without verification or rl is suboptimal. arXiv preprint arXiv:2502.12118, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.207, + 0.827, + 0.264 + ], + "angle": 0, + "content": "Pratyusha Sharma, Antonio Torralba, and Jacob Andreas. Skill induction and planning with latent language. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics, May 2022. URL https://aclanthology.org/2022.acl-long.120/." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.272, + 0.825, + 0.316 + ], + "angle": 0, + "content": "Eui Chul Shin, Miltiadis Allamanis, Marc Brockschmidt, and Alex Polozov. Program synthesis and semantic parsing with learned code idioms. Advances in Neural Information Processing Systems, 32, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.324, + 0.827, + 0.368 + ], + "angle": 0, + "content": "Noah Shinn, Federico Cassano, Edward Berman, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. Reflexion: Language agents with verbal reinforcement learning, 2023. URL https://arxiv.org/abs/2303.11366." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.375, + 0.827, + 0.418 + ], + "angle": 0, + "content": "Yifan Song, Da Yin, Xiang Yue, Jie Huang, Sujian Li, and Bill Yuchen Lin. Trial and error: Exploration-based trajectory optimization for llm agents, 2024. URL https://arxiv.org/abs/2403.02502." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.427, + 0.825, + 0.471 + ], + "angle": 0, + "content": "Guanzhi Wang, Yuqi Xie, Yunfan Jiang, Ajay Mandlekar, Chaowei Xiao, Yuke Zhu, Linxi Fan, and Anima Anandkumar. Voyager: An open-ended embodied agent with large language models, 2023. URL https://arxiv.org/abs/2305.16291." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.478, + 0.825, + 0.523 + ], + "angle": 0, + "content": "Zhiruo Wang, Graham Neubig, and Daniel Fried. TroVE: Inducing verifiable and efficient toolboxes for solving programmatic tasks. In *Forty-first International Conference on Machine Learning*, 2024a. URL https://openreview.net/forum?id=DCNCwamJjI." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.53, + 0.825, + 0.56 + ], + "angle": 0, + "content": "Zora Zhiruo Wang, Jiayuan Mao, Daniel Fried, and Graham Neubig. Agent workflow memory. arXiv preprint arXiv:2409.07429, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.568, + 0.827, + 0.626 + ], + "angle": 0, + "content": "Lionel Wong, Jiayuan Mao, Pratyusha Sharma, Zachary S Siegel, Jiahai Feng, Noa Korneev, Joshua B. Tenenbaum, and Jacob Andreas. Learning grounded action abstractions from language. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=qJ0Cfj4Ex9." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.633, + 0.827, + 0.691 + ], + "angle": 0, + "content": "Jian Xie, Kai Zhang, Jiangjie Chen, Tinghui Zhu, Renze Lou, Yuandong Tian, Yanghua Xiao, and Yu Su. Travelplanner: A benchmark for real-world planning with language agents. In *Forty-first International Conference on Machine Learning*, 2024a. URL https://openreview.net/forum?id=15XQzNkAOe." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.699, + 0.827, + 0.755 + ], + "angle": 0, + "content": "Tianbao Xie, Danyang Zhang, Jixuan Chen, Xiaochuan Li, Siheng Zhao, Ruisheng Cao, Toh Jing Hua, Zhoujun Cheng, Dongchan Shin, Fangyu Lei, et al. Osworld: Benchmarking multimodal agents for open-ended tasks in real computer environments. arXiv preprint arXiv:2404.07972, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.764, + 0.825, + 0.809 + ], + "angle": 0, + "content": "Frank F Xu, Yufan Song, Boxuan Li, Yuxuan Tang, Kritanjali Jain, Mengxue Bao, Zora Z Wang, Xuhui Zhou, Zhitong Guo, Murong Cao, et al. Theagentcompany: benchmarking llm agents on consequential real world tasks. arXiv preprint arXiv:2412.14161, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.816, + 0.825, + 0.874 + ], + "angle": 0, + "content": "Yiheng Xu, Dunjie Lu, Zhennan Shen, Junli Wang, Zekun Wang, Yuchen Mao, Caiming Xiong, and Tao Yu. Agenttrek: Agent trajectory synthesis via guiding replay with web tutorials. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=EEgYUccwsV." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.881, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Shunyu Yao, Howard Chen, John Yang, and Karthik Narasimhan. Webshop: Towards scalable real-world web interaction with grounded language agents. Advances in Neural Information Processing Systems, 35:20744-20757, 2022." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.147 + ], + "angle": 0, + "content": "Shunyu Yao, Noah Shinn, Pedram Razavi, and Karthik Narasimhan. \\(\\tau\\)-bench: A benchmark for tool-agent-user interaction in real-world domains. arXiv preprint arXiv:2406.12045, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.826, + 0.199 + ], + "angle": 0, + "content": "Ori Yoran, Samuel Joseph Amouyal, Chaitanya Malaviya, Ben Bogin, Ofir Press, and Jonathan Berant. Assistantbench: Can web agents solve realistic and time-consuming tasks? arXiv preprint arXiv:2407.15711, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.206, + 0.829, + 0.293 + ], + "angle": 0, + "content": "Wenhao Yu, Nimrod Gileadi, Chuyuan Fu, Sean Kirmani, Kuang-Huei Lee, Montserrat Gonzalez Arenas, Hao-Tien Lewis Chiang, Tom Erez, Leonard Hasenclever, Jan Humplik, brian richter, Ted Xiao, Peng Xu, Andy Zeng, Tingnan Zhang, Nicolas Heess, Dorsa Sadigh, Jie Tan, Yuval Tassa, and Fei Xia. Language to rewards for robotic skill synthesis. In 7th Annual Conference on Robot Learning, 2023. URL https://openreview.net/forum?id= SgTPdyehXMA." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.3, + 0.826, + 0.344 + ], + "angle": 0, + "content": "Longtao Zheng, Rundong Wang, Xinrun Wang, and Bo An. Synapse: Trajectory-as-exemplar prompting with memory for computer control. In The Twelfth International Conference on Learning Representations, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.351, + 0.829, + 0.424 + ], + "angle": 0, + "content": "Shuyan Zhou, Frank F. Xu, Hao Zhu, Xuhui Zhou, Robert Lo, Abishek Sridhar, Xianyi Cheng, Tianyue Ou, Yonatan Bisk, Daniel Fried, Uri Alon, and Graham Neubig. Webarena: A realistic web environment for building autonomous agents. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=oKn9c6ytLx." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.431, + 0.829, + 0.475 + ], + "angle": 0, + "content": "Yifei Zhou, Qianlan Yang, Kaixiang Lin, Min Bai, Xiong Zhou, Yu-Xiong Wang, Sergey Levine, and Erran Li. Proposer-agent-evaluator (pae): Autonomous skill discovery for foundation model internet agents. arXiv preprint arXiv:2412.13194, 2024b." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.829, + 0.475 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.102, + 0.387, + 0.12 + ], + "angle": 0, + "content": "A Experiment Details" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.137, + 0.367, + 0.154 + ], + "angle": 0, + "content": "A.1 Agent Action Space" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.165, + 0.828, + 0.209 + ], + "angle": 0, + "content": "Table 7 shows the default action space the web navigation agents we employed in all the experiments. This action space remains the same for both (i) static, vanilla agent, as well as the (ii) adaptive agent that learn textual skills in memory, i.e., AWM." + }, + { + "type": "table", + "bbox": [ + 0.24, + 0.218, + 0.756, + 0.452 + ], + "angle": 0, + "content": "
Action TypeDescription
noopwait_ms)Do nothing for specified time.
click(elem)Click at an element.
hover(elem)Hover on an element.
fill(elem, value)Type into an element.
keyboard_press(key_comb)Press a key combination.
scroll(x, y)Scroll horizontally or vertically.
select_option(elem, options)Select one or multiple options.
goto(url)Navigate to a url.
go_back()Navigate to the previous page.
go_forward()Navigate to the next page.
new_tab()Open a new tab.
tab_close()Close the current tab.
tabfocus(index)Bring tab to front.
send msg_to_user(text)Send a message to the user.
report_infeasible Reason)Notify user that instructions are infeasible.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.254, + 0.455, + 0.744, + 0.471 + ], + "angle": 0, + "content": "Table 7: Initial action space for agents throughout our experiments." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.503, + 0.497, + 0.519 + ], + "angle": 0, + "content": "A.2 Prompts for LLM-Based Components" + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.536, + 0.432, + 0.552 + ], + "angle": 0, + "content": "Prompt for Episode Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.565, + 0.803, + 0.649 + ], + "angle": 0, + "content": "You are an expert in evaluating the performance of a web navigation agent. The agent is designed to help a human user navigate a website to complete a task. Given the user's intent, the agent's action history, the final state of the webpage, and the agent's response to the user, your goal is to decide whether the agent's execution is successful or not. Please be careful of each detail and strict about the evaluation process." + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.648, + 0.804, + 0.857 + ], + "angle": 0, + "content": "There are three types of tasks: 1. Information seeking: The user wants to obtain certain information from the webpage, such as the information of a product, reviews, map info, comparison of map routes, etc. The bot's response must contain the information the user wants, or explicitly state that the information is not available. Otherwise, e.g. the bot encounters an exception and respond with the error content, the task is considered a failure. Besides, be careful about the sufficiency of the agent's actions. For example, when asked to list the top-searched items in a shop, the agent should order the items by the number of searches, and then return the top items. If the ordering action is missing, the task is likely to fail. 2. Site navigation: The user wants to navigate to a specific page. Carefully examine the bot's action history and the final state of the webpage to determine whether the bot successfully completes the task. No need to consider the bot's response. 3. Content modification: The user wants to modify the content of a webpage or configuration. Carefully examine the bot's action history and the final state of the webpage to determine whether the bot successfully completes the task. No need to consider the bot's response." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.856, + 0.802, + 0.884 + ], + "angle": 0, + "content": "*IMPORTANT* Please be strict about the evaluation process. Format your response into two lines as shown below:" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.884, + 0.572, + 0.899 + ], + "angle": 0, + "content": "Thoughts: " + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.898, + 0.413, + 0.911 + ], + "angle": 0, + "content": "Status: \"success\" or \"failure\"" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.106, + 0.421, + 0.122 + ], + "angle": 0, + "content": "Prompt for Episode Cleaning" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.134, + 0.8, + 0.177 + ], + "angle": 0, + "content": "You are a helpful assistant in summarizing web browsing actions. Your task is to summarize the main action taken and the corresponding state change, from the input thought process and environmental description." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.177, + 0.802, + 0.315 + ], + "angle": 0, + "content": "Example input: \"Let me think about this step by step: 1) I've clicked the Columns button and based on the accessibility tree, I can now see a list of available columns that can be shown/hidden. 2) Looking through the available columns in the accessibility tree, I don't see any direct column for \"Number of Orders\" or similar metric. 3) Since I don't see the information I need in the available columns, this suggests that: - Either this information is available through a different path - Or we need to access the Sales/Orders section to correlate this information 4) The most logical path would be to: - Go to the Sales section where we can see all orders - And then analyze which customers have the most orders 5) From the accessibility tree, I can see the \"SALES\" link with bid \"183\" in the main navigation." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.315, + 0.798, + 0.342 + ], + "angle": 0, + "content": "Let me click on the SALES link: This should open the Sales menu where we can access the Orders section to analyze customer order histories.\"" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.342, + 0.8, + 0.37 + ], + "angle": 0, + "content": "Example output: \"Clicked the SALES link to access the Orders section for customer order histories.\"" + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.392, + 0.402, + 0.406 + ], + "angle": 0, + "content": "Prompt for Skill Induction" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.42, + 0.8, + 0.463 + ], + "angle": 0, + "content": "You are a proficient software engineer. Your task is to (1) summarize reusable functions as APIs from the provided action trajectories, and (2) rewrite the trajectories using the reusable functions you generated in (1)." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.463, + 0.801, + 0.559 + ], + "angle": 0, + "content": "For (1), from the provided examples about the same task, you job is to generate Python functions that can be reused to solve (part of) these tasks. The functions should have mediocre complexity: (i) containing at least three actions and not too simple (e.g., a single line of code), (ii) not too complex (e.g., more than 10 lines of code), and should be general enough to be applied to other similar tasks. The arguments to these functions should be common variables (such as strings and lists), avoid using complex inputs such as another function." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.559, + 0.802, + 0.613 + ], + "angle": 0, + "content": "The actions you can use are: 'click', 'fill', 'hover', 'keyboard_press', 'scroll'; 'tabfocus', 'new_tab', 'tab_close', 'go_back', 'go_forward'; 'send msg to user', 'report_infeasible', 'select_option'. Do not use other undefined actions. Do not include any try-except blocks in the functions." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.614, + 0.801, + 0.698 + ], + "angle": 0, + "content": "Please include 'Args', 'Returns', and 'Examples' in the function documentation. For (2), write the instruction and rewritten code of each example. Do not include the answer response or example-specific information in the rewritten code. Pay attention to whether all link IDs are available before specifying them in the generated functions. If you use 'send msg_to_user', make sure the message is decided within the function, instead of provided as an argument." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.698, + 0.801, + 0.74 + ], + "angle": 0, + "content": "Make sure each function contains no less than 2 steps, and no more than 5 steps; to keep the functions simple and task-oriented. You can generate zero, one, or multiple functions depending on the provided examples." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.774, + 0.436, + 0.792 + ], + "angle": 0, + "content": "B Skill Induction: Analysis" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.807, + 0.825, + 0.837 + ], + "angle": 0, + "content": "We provide more details about the skill induction process, in skill curation and reuse frequency (§B.1) and representative skill case studies (§B.2)" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.855, + 0.454, + 0.872 + ], + "angle": 0, + "content": "B.1 Skill Induction and Reusability" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.825, + 0.926 + ], + "angle": 0, + "content": "To provide more insights on how agents curate and reuse programmatic skills, for the main experiments on WebArena, we calculate the number of examples that (i) attempt to induce a new skill, (ii) successfully induce a new skill, and (iii) reuse a previously induced skill." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.517, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.828, + 0.149 + ], + "angle": 0, + "content": "As shown in Table 8, the agent reuses at least 1 skill for \\(42.5\\%\\) of the cases, which is pretty frequent across websites. Moreover, these skills are created using \\(7.6\\%\\) of the tasks, demonstrating the high scalability of our skill learning approach." + }, + { + "type": "table", + "bbox": [ + 0.207, + 0.153, + 0.789, + 0.257 + ], + "angle": 0, + "content": "
DomainAttempted InductionNumber of Examples
Successful InductionReuse ExamplesTotal
shopping21896180
admin3815108156
reddit241114105
map131080109
gitlab251110175
" + }, + { + "type": "text", + "bbox": [ + 0.202, + 0.26, + 0.793, + 0.276 + ], + "angle": 0, + "content": "Table 8: Analyzing the number of examples that attempt, induce, and reuse skills." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.3, + 0.376, + 0.316 + ], + "angle": 0, + "content": "B.2 Representative Skills" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.326, + 0.825, + 0.357 + ], + "angle": 0, + "content": "We demonstrate two representative types of skills, which (i) chain frequently-used-together actions, and (ii) involve advanced programming primitives." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.37, + 0.828, + 0.428 + ], + "angle": 0, + "content": "Chaining Primitive Actions The most common type of skills chains multiple frequently-used-together primitive actions (e.g., click, fill) in a certain order, to reflect a particular common usage, such as the search_product tool illustrated below. This type of skill provides an easy portal for frequent workflows." + }, + { + "type": "code", + "bbox": [ + 0.164, + 0.432, + 0.459, + 0.513 + ], + "angle": 0, + "content": "def search_product(name: str):\n click(\"Search textbox...\")\n if has_pop_window():\n click(\"Close\")\n fill(\"Search textbox\", name)\n click(\"Search button\")" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.538, + 0.827, + 0.569 + ], + "angle": 0, + "content": "Advanced Programming Primitives Besides a sequential chaining of actions, some skills contain more advanced logics, such as if-else or for/while loops, as the two examples below." + }, + { + "type": "code", + "bbox": [ + 0.156, + 0.572, + 0.793, + 0.83 + ], + "angle": 0, + "content": "def navigate_and_sort_category(category_id: str, subcategory_id: str | None = None):\n '''\"Navigate to a product category and sort items by price.\nArgs:\n category_id: ID of the main category link\n subcategory_id: Optional ID of the subcategory link, if needed\nReturns:\n None\nExamples:\n navigate_and_sort_category('1241', '1873') # PS4 under Video Games\n navigate_and_sort_category('1245') # For main category only\n '''' click(category_id) # Click main category\nif subcategory_id:\n click(subcategory_id) # Click subcategory if provided\nselect_option(\"1553\", \"Price\") # Sort by price ascending" + }, + { + "type": "code", + "bbox": [ + 0.156, + 0.846, + 0.82, + 0.926 + ], + "angle": 0, + "content": "def browse_category_byjahicationmenu_id_sequence: list):\n \"\"Browse products by navigating through a sequence of menu IDs.\n This function allows navigation through a series of menu interactions.\n Args:\n menu_id_sequence(list): A list of menu IDs to interact\n sequentially, using hover actions followed by a click." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "code", + "bbox": [ + 0.156, + 0.105, + 0.831, + 0.251 + ], + "angle": 0, + "content": "Example usage: browse_category_byjahgnation['735','786','797']#Navigates Home&Kitchen->Storage&Organization->Racks,Shelves& Drawers \n\"''\" foridx,menu_idin enumerate(menu_id_sequence[-1]): hover/menu_id) click菜单_id_sequence[-1])#Click the final id to land in the predefined category" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.28, + 0.525, + 0.298 + ], + "angle": 0, + "content": "C Scaled-Up and Cross-Website Tests" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.313, + 0.827, + 0.357 + ], + "angle": 0, + "content": "We provide the full list of tasks used in scaled-up (S4) and cross-website (S5) analyses in S.C.1 and S.C.2, respectively. In S.C.3, we further perform significance testing to validate the findings from Table 4 and Table 6." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.374, + 0.342, + 0.391 + ], + "angle": 0, + "content": "C.1 Scaled-Up Tasks" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.401, + 0.825, + 0.432 + ], + "angle": 0, + "content": "Table 9, Table 10, Table 11, Table 12 and Table 13 shows example scaled-up tasks studied on the shopping, admin, social forum, software development, and map websites." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.445, + 0.825, + 0.761 + ], + "angle": 0, + "content": "
InstructionCheckpointsScore
Add a wireless headphone, a water bottle, a notebook, a ground coffee, and a mug to my shopping cart.Add a wireless headphone to cart; Add a water bottle to cart; Add a notebook to cart; Add a ground coffee to cart; Add a mug to cart.5
Add the most expensive item from the video games category, the cheapest item from the Office Products category, and the most relevant coffee mug to my shopping cart.Add the most expensive item from the video games category to cart; Add the cheapest item from the Office Products category to cart; the most relevant coffee mug to my shopping cart.3
Add the cheapest wireless headphone, a water bottle, the most expensive notebook, a ground coffee, and a mug to my shopping cart.Add the cheapest wireless headphone to cart; Add a water bottle to cart; Add the most expensive notebook to cart; Add a ground coffee to cart; Add a mug to cart.5
Show me the ordered items for each cancelled order from Feb to May in 2023.Show me the 5/17/23 order; Show me the 2/24/23 order; Show me the 2/11/23 order.3
Iterative update my billing address to 231 Willow Way, Suite 100, Chicago, IL, 60601. Then, update my shipping address to 987 Sycamore Circle, Philadelphia, PA, 19102.Successfully update my billing address; Successfully update my shipping address.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.245, + 0.769, + 0.752, + 0.786 + ], + "angle": 0, + "content": "Table 9: Exemplar scaled-up browsing tasks on the shopping website." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.813, + 0.371, + 0.827 + ], + "angle": 0, + "content": "C.2 Cross-Website Tasks" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.839, + 0.828, + 0.884 + ], + "angle": 0, + "content": "Table 14, Table 15, and Table 16 lists example tasks to test agent generalization abilities on shopping (OneStopMarket to Target), social forum (Postmill to Reddit), and software development (GitLab to GitHub) domains." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.901, + 0.368, + 0.917 + ], + "angle": 0, + "content": "C.3 Significance Testing" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.101, + 0.825, + 0.543 + ], + "angle": 0, + "content": "
InstructionCheckpointsScore
Tell me the number of reviews that our store received by far that mention terms 'disappointed', 'satisfied', 'decent', 'not useful', and 'best'.Return the correct number for terms 'disappointed', 'satisfied', 'decent', 'not useful', and 'best'.5
I need to contact a list of customers. Find the customer name and email with phone number 2058812302, 2137418080, 2065555555, 8015551212, and 555-229-3326.Return the correct name and email information for customers with each of the five phone numbers.5
I will need to update our webpage to create a more energetic vibe. Change the page title of '404 Not Found' to 'Bruh bro you clicked the wrong page', the page title of 'Enable Cookies' to 'Cookie monster coming to your place', the page title of 'Home Page' page to 'This is the home page!!', the page with title 'Privacy Policy' to 'No privacy policy is needed is this dystopian world', and lastly, change the page 'About Us' to 'Secret'.Change the page title correctly for each of the five pages.5
I need to generate a bunch of report to show to the store manager in an hour. Could you help me generate a sales order report for the last month, over the last 45 days, and for Q1? I'll also need a refund report for last year, and a tax report for this year. Today is 3/15/2023.Generate a sales report for 2/1/2023-2/29/2023; generate a sales report for 1/29/2023-3/15/2023; generate a sales report for 1/1/2023-3/15/2023; Generate a refund report for 1/1/2022-12/31/2022; Generate a tax report for 1/1/2023-3/15/2023.5
Tell me the SKU of products that have 10 units, 3 units, and 0 units left. Also, give me the product names that have 2-3 units left.Return the correct SKU for the first three questions; return the correct product names for the last question.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.216, + 0.551, + 0.781, + 0.568 + ], + "angle": 0, + "content": "Table 10: Exemplar scaled-up browsing tasks on the shopping admin website." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.593, + 0.422, + 0.718 + ], + "angle": 0, + "content": "Scaled-Up Tasks We conduct t-tests between (i) ASI and AWM, (ii) ASI and VANILLA agent. From the results in Table 17, we find the advantage of ASI in success rate and efficiency improvements, when comparing to both AWM and VANILLA agents, are statistically significant, as indicated by all" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.718, + 0.625, + 0.733 + ], + "angle": 0, + "content": "t-statistics with absolute values over 2 and p-value below 0.05." + }, + { + "type": "table", + "bbox": [ + 0.434, + 0.594, + 0.821, + 0.659 + ], + "angle": 0, + "content": "
Method PairSuccess Rate# Steps
t-statp-valuet-statp-value
ASI vs. AWM-2.36010.02262.76640.0068
ASI vs. VANILLA-4.09220.00022.19830.0296
" + }, + { + "type": "table_caption", + "bbox": [ + 0.431, + 0.666, + 0.828, + 0.697 + ], + "angle": 0, + "content": "Table 17: Results of significance testing on ASI's advantages for scaled-up web tasks." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.747, + 0.422, + 0.873 + ], + "angle": 0, + "content": "Cross-Web Tasks We conduct similar significance testing on cross-web tasks and report the results in Table 18. While ASI does not significantly outperform AWM in success rate, given the presumably greater flexibility of textual workflows, ASI still exhibits significant advantages on" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.873, + 0.827, + 0.916 + ], + "angle": 0, + "content": "the efficiency side. Furthermore, comparing ASI to static VANILLA agents, ASI achieves significant improvements in both success rates and efficiency (i.e., reduced number of steps), suggested by \\( |t| > 2 \\) and \\( p < 0.05 \\)." + }, + { + "type": "table", + "bbox": [ + 0.434, + 0.757, + 0.821, + 0.822 + ], + "angle": 0, + "content": "
Method PairSuccess Rate# Steps
t-statp-valuet-statp-value
ASI vs. AWM-1.39800.16732.12380.0378
ASI vs. VANILLA-3.59840.00072.57920.0125
" + }, + { + "type": "table_caption", + "bbox": [ + 0.431, + 0.829, + 0.827, + 0.859 + ], + "angle": 0, + "content": "Table 18: Results of significance testing on ASI's advantages for cross-web tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.121, + 0.825, + 0.463 + ], + "angle": 0, + "content": "
InstructionCheckpointsScore
I'm planning to organize multiple meetings in the next few months. Help me post notices on virtual meetings for the little women on Apr 10th, for Harry Potter in May 15th, and for Jane Eyre in Jan 30th, in the most suitable forums in PostMill.Post Apr 10th meetup; Post about May 15th meetup; Post Jan 30th meetup. All in book-related forums.3
Could you tell me all forums with names related to computer science?must include: deeplearning (1 pt), Machine-Learning (1 pt); optionally (get 1 score if include any): science, askscience, technology.3
Find the most relevant posts about jerseycity, newjersey, and nyc; and tell me how different they are.Correctly find post about jerseycity; Correctly find post about newjersey; Correctly find post about nyc; Answer how different they are.4
Thumbs down the top-2 posts in jerseycity, newjersey, and nyc forums, I don't like them.Thumbs down the top-2 posts in the jerseycity forum; Thumbs down the top-2 posts in the newjersey forum; Thumbs down the top-2 posts in the nyc forum.3
Reply "Thank you! This is super helpful!" to three posts about long-distance relationship advice.Reply to three posts with the correct message. Need to be relevant to long-distance relationship advice.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.283, + 0.471, + 0.715, + 0.487 + ], + "angle": 0, + "content": "Table 11: Exemplar scaled-up tasks on the Postmill website." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.535, + 0.825, + 0.877 + ], + "angle": 0, + "content": "
InstructionCheckpointsScore
Display the list of issues in the a11yproject/a11yproject.com repository that have labels related to 'help needed', and assign the most recent one to the top contributor of this repository.Display the help-wanted issues; find the top contributor; assign him to the most recent help-needed issue.3
Set up a new, empty repository with the name agent_skill_induction, and create a MIT license file. Then, invite Abishek and Vinta as collaborators.Create a new repository with given name; Create a MIT license inside; Invite both collaborators.3
Start a private project web_agent.android xl with Android template and add primer, convexegg, abishek as members.Create the repository private and with Android template; Invite all three people as members.2
Add the following users to repo a11y-webring.club as developer: [abisubramanya27, lahwaacz], and [yjlou, a11yproject] as maintainer.Add abisubramanya27 and lahwaacz as developers; Add yjlou and a11yproject as maintainers.2
Add the following users [abisubramanya27, lahwaacz, yjlou, a11yproject] to repo a11y-webring.club, make sure to assign them different roles.Add abisubramanya27 with role 1; Add lahwaacz with role 2; Add yjlou with role 3; Add a11yproject as role 4. Role 1-4 need to be all different.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.287, + 0.885, + 0.711, + 0.902 + ], + "angle": 0, + "content": "Table 12: Exemplar scaled-up tasks on the GitLab website." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.164, + 0.825, + 0.516 + ], + "angle": 0, + "content": "
InstructionCheckpointsScore
Search for the closest restaurants, cafes, parking, and banks to Carnegie Mellon University on the map.Return the closest restaurants; Return the closest cafes; Return the closest parking; Return the closest banks.4
I will need to go to multiple places from Carnegie Mellon University today, including the Univ of Pittsburgh, UPMC shady-side, the Schenley park, and Squirrel Hill. Could you should me the driving route to all those places?Show me driving route from CMU to UPitt; Show me driving route from CMU to UPMC; Show me driving route from CMU to Schenley Park; Show me driving route from CMU to Squirrel Hill.4
Show me the route of driving from CMU to University of Pittsburgh, then walking to the Schenley Park; next, bike to UPMC shadyside, and walk to Squirrel Hill after that.Show me CMU → Upitt route by car; Show me Upitt → Schenley Park route by foot; Show me Schenley Park → UPMC route by bike; Show me UPMC → Squirrel Hill route by foot.4
Check if the Univ of Pittsburgh, UPMC shadyside, schenley park, and squirrel hill can be reached within one hour by walking, if departing from Carnegie Mellon University.Return yes to route 1, route 2, route 3, and route 4.4
Tell me the coordinates of Univ of Pittsburgh, UPMC shadyside, schenley park, squirrel hill, and CMU in DD format.Return the coordinates of each of the four places.4
" + }, + { + "type": "table_caption", + "bbox": [ + 0.295, + 0.524, + 0.7, + 0.541 + ], + "angle": 0, + "content": "Table 13: Exemplar scaled-up tasks on the Map website." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.672, + 0.825, + 0.836 + ], + "angle": 0, + "content": "
InstructionCheckpointsScore
Show me the options for Canon photo printer?Return the correct search result.1
I have a lot of Nintendo Switch game cards now, help me find the best storage option to fit all 11 cards.Return one valid product.1
What is the price range for beauty products?Return the correct price range.1
Show me products under $25 for woman shoesDisplay correct products.1
Show the least expensive shoe storage with a minimum storage capacity of 12 pairs.Display correct products.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.291, + 0.845, + 0.704, + 0.861 + ], + "angle": 0, + "content": "Table 14: Exemplar shopping tasks on the target website." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.159, + 0.825, + 0.413 + ], + "angle": 0, + "content": "
InstructionCheckpointsScore
Tell me the names of books recommended in the latest five posts in the books forumFind the r/books forum; Find the most recent 5 posts; Give the correct answer.3
Tell me the titles of the 5 most recent posts about little women in the books forumFind the r/books forum; Find little women related posts; Sort the posts by newest.3
What are the recommended products for noise-canceling headphones within a bud-get of $200 in r/headphonesFind the r/headphones forum; Correctly search with noise-canceling, under $200 re-requirements; Return a valid headphone rec-ommendation.3
Find 3 pieces of advices about deal with long-distance relationships in a subreddit for relations.Navigate to a forum about relations; find at least 3 pieces of advice from relevant posts.2
Find if there are any jeep wrangler meetups. If so, when and where?Search in jeep wrangler related forums; Re-turn a valid answer based on the search result.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.281, + 0.421, + 0.719, + 0.436 + ], + "angle": 0, + "content": "Table 15: Exemplar social forum tasks on the reddit website." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.561, + 0.825, + 0.84 + ], + "angle": 0, + "content": "
InstructionCheckpointsScore
Tell me the full address of all international airports that are within a driving distance of 30 miles to Carnegie Mellon UniversityReturn Pittsburgh International Airport.1
I will arrive Pittsburgh Airport soon. Provide the name of a Hilton hotel in the vicinity, if available. Then, tell me the the walking distance to the nearest supermarket own by a local company from the hotel.Show me the hotels; Find a nearby supermarket; Show me the walking route from the hotel to the supermarket.3
Show me the walking route from nearby hotels to CMU, Pittsburgh that take at most 5 minutes?Find a hotel that meets the walking time requirement; Show me the walking route.2
I am at CMU Pittsburgh, how long it takes to the nearest USPS postal office with different transportation methods?Return travel time by car, by foot, by bus, and by bike.4
Tell me the coordinates of Carnegie Mellon Cafe in DD format.Return the correct coordinates.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.254, + 0.848, + 0.744, + 0.864 + ], + "angle": 0, + "content": "Table 16: Exemplar social forum tasks on the Google Maps website." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06821/04e4beb1-0999-4f5d-99d2-8bd91900a008_origin.pdf b/data/2025/2504_06xxx/2504.06821/04e4beb1-0999-4f5d-99d2-8bd91900a008_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..9877ee3294038c65284a6d1624656218f43a38bf --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/04e4beb1-0999-4f5d-99d2-8bd91900a008_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8fbadd2b320fab25fa7302d3ba18df2a216ebf0819d83ec3f457f7117e103d0a +size 3485421 diff --git a/data/2025/2504_06xxx/2504.06821/full.md b/data/2025/2504_06xxx/2504.06821/full.md new file mode 100644 index 0000000000000000000000000000000000000000..7e9007c532092b9d4b762ae6ba97aa17945abf90 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/full.md @@ -0,0 +1,442 @@ +# Inducing Programmatic Skills for Agentic Tasks + +Zora Zhiruo Wang Apurva Gandhi Graham Neubig Daniel Fried + +Carnegie Mellon University + +{zhiruow,apurvag,gneubig,dfried}@cs.cmu.edu + +# Abstract + +To succeed in common digital tasks such as web navigation, agents must carry out a variety of specialized tasks such as searching for products or planning a travel route. To tackle these tasks, agents can bootstrap themselves by learning task-specific skills online through interaction with the web environment. In this work, we demonstrate that programs are an effective representation for skills. We propose agent skill induction (ASI), which allows agents to adapt themselves by inducing, verifying, and utilizing program-based skills on the fly. We start with an evaluation on the WebArena agent benchmark and show that ASI outperforms the static baseline agent and its text-skill counterpart by $23.5\%$ and $11.3\%$ in success rate, mainly thanks to the programmatic verification guarantee during the induction phase. ASI also improves efficiency by reducing $10.7 - 15.3\%$ of the steps over baselines, by composing primitive actions (e.g., click) into higher-level skills (e.g., search_product). We then highlight the efficacy of ASI in remaining efficient and accurate under scaled-up web activities. Finally, we examine the generalizability of induced skills when transferring between websites, and find that ASI can effectively reuse common skills, while also updating incompatible skills to versatile website changes. $^{1}$ + +# 1 Introduction + +To achieve success in common digital tasks such as web navigation, it is essential for agents to be able to perform a variety of specialized tasks such as searching for products on a shopping website (Yao et al., 2022; Deng et al., 2024) or finding a driving route on the map (Zhou et al., 2024a; Xie et al., 2024a). While one source for agents to learn such tasks is demonstrations annotated by humans (Deng et al., 2024) or synthesized with large language models (LMs) on websites of interest (Murty et al., 2024b;a), this can be a challenging offline learning procedure given the broad range of website domains and functionalities, especially for the collected demonstrations to match or cover the distribution of tasks queried at inference time (Zhou et al., 2024b); not to mention the limitations in resources to collect abundant high-quality data at ease (Pan et al., 2024). + +Instead of learning from demonstrations offline, an alternative way is to learn these tasks directly online from test queries to prevent potential distribution mismatch between demonstration and downstream tasks (Levine et al., 2020). Some works propose to have agents induce casual abstractions (Majumder et al., 2024), single-state guidelines (Fu et al., 2024), or multi-step procedural workflows (Sarch et al., 2024; Wang et al., 2024b) as a form of intermediate knowledge to augment agent memory via non-parametric approaches (Brown et al., 2020). Nonetheless, most existing approaches represent this knowledge in text, offering limited quality and verification guarantees. In this work, we propose that executable programs are effective representations for intermediate skill acquisition, given their verifiability and composability advantages (Setlur et al., 2025). + +We present ASI, namely agent skill induction (§2), that induces and applies programmatic skills along the process of solving user web navigation queries. More concretely, given a natural language (NL) query, the agent first generates an action trajectory attempting to solve the task using built-in, primitive actions such as click and scroll. The agent then + +induces higher-level skills (e.g., search_product(name)) that wrap primitive actions or prior skills as executable programs, accompanied with corresponding test trajectories to verify their quality. Verified skills are then incorporated into the agent action space and can be directly called to solve future tasks with similar procedures, as depicted in Figure 1 (bottom). + +We first evaluate ASI on the WebArena benchmark (Zhou et al., 2024a) (§3) and demonstrate that our online, adaptive ASI surpasses its static agent baseline without adaptive components by $23.5\%$ in success rate. To validate the advantage of using programmatic representations for skills, we further compare to an adaptive agent, AWM (Wang et al., 2024b), that represents skills in memory as non-executable texts (Figure 1 top); we find ASI scores $11.3\%$ higher success rate by employing verifiable, programmatic skills (Figure 1 bottom). Beyond the correctness aspect, the task-solving procedures by ASI-supported agents are $10.7 - 15.3\%$ more efficient than the baseline approaches, mainly because of the action space abstraction and composition enabled by the programmatic skill representation. + +![](images/e184163a935c302d2edc322fe07a755bc00b82f9292ce321df60b7e979ab1931.jpg) +Figure 1: Online adaptive agent that induces and reuses programmatic skills as actions (bottom), as opposed to adding textual skills in memory (top). + +We further stress test ASI on scaled-up scenarios (S4) that require substantially longer-horizon trajectories to complete the task. Across various domains such as shopping and social forums, we find the efficiency advantage offered by ASI is more pronounced, reducing action trajectories by 9.5 and 5.6 average steps compared to static and text-form agents. Facilitated by this improved procedural efficiency and planning, we find that ASI agent adheres to the optimal trajectory better and achieves tasks $20.7 - 38.9\%$ more correctly. + +Finally, we study agent behaviors on generalizing induced skills to other websites (§5), particularly from sandboxed, simulated websites to real-world websites of similar domains. While ASI agents effectively transfer common skills (e.g., search_product) to new websites, some skills may be incompatible with alternative website designs hence less useful. Nonetheless, ASI can quickly refine its prior skills or create new skills on new websites, indicating it allows agents to adapt online while maintaining verifiability via programs. + +In short, ASI enhances web agent success and efficiency by inducing and applying verifiable programmatic skills, in general and longer-horizon tasks, even across varied websites. + +# 2 Agent Skill Induction + +In this section, we first lay out the web agent problem setup (§2.1) and introduce online, self-adaptive agents (§2.2). We then describe the core component of ASI—programmatic skill induction and verification (§2.3). + +# 2.1 Problem Statement: Online Adaptive Agent + +For the scope of this work, we focus on language model (LM) based agents, where each agent policy consists of an LM backbone $\mathcal{L}$ , a memory $\mathcal{M}$ , and a skill library $\mathcal{A}$ , as illustrated in Figure 1 top and bottom. In the implementation, the memory $\mathcal{M}$ and the skill library $\mathcal{A}$ are provided as input context to the LM backbone. We denote the agent policy as $\pi_{\mathcal{L}}(\cdot|\mathcal{M},\mathcal{A})$ and $\pi_{\mathcal{L}}$ for short. We focus on the web browser environment defined by a transition function $\mathcal{T}(s'|s,a)$ that models the change in the webpage after an action. + +We focus on an online adaptation scenario where we have access to a sequence of NL queries $Q = \{q_{1}, q_{2}, \dots, q_{N}\}$ specifying the tasks, and no other information such as demonstration + +trajectories or ground-truth rewards are available (Wang et al., 2024a,b). For each task specified by a natural language (NL) query $q$ , the agent generates a trajectory of actions $\tau = (s_0, a_0, s_1, a_1, \dots, s_{H-1}, a_{H-1}, s_H)$ for a finite number of $H$ steps. At each time step $h$ in the horizon, the agent receives observation $o_h$ from the current state $s_h$ , and generates an action $a_h \in \mathcal{A}$ based on the observations and actions so far, via $\pi_{\mathcal{L}}(o_{0:h}, a_{0:h-1}; \mathcal{M}, \mathcal{A}) \to a_h$ . The generated action will be executed on the environment and incurs a state change $\mathcal{T}(s_h, a_h) \to s_{h+1}$ . This observe-act loop continues for $H$ steps until the task reaches a task-terminating condition, such as the agent generating a termination action (e.g., send msg_to_user) or the horizon reaches a pre-determined maximum number of steps $h = H_{max}$ . We denote each pair of query and trajectory $(q, \tau) := e$ as an episode $e$ . Agents can update the content in $\mathcal{M}$ and $\mathcal{A}$ and reuse them across episodes. + +# 2.2 Inducing Reusable Skills + +To realize online adaptive agents, one common approach is to induce skills from correct trajectories to update the agent (Wang et al., 2024b). But since ground-truth rewards are unavailable, an LLM-based evaluator $V_{\mathcal{L}}$ is often used to judge the correctness of episodes. Formally, from the total of $N$ episodes throughout the online process $\{e^1,\dots ,e^N\} \coloneqq \mathcal{E}$ , we employ an LM-based evaluator $V_{\mathcal{L}}(e)\to 0 / 1$ to filter out the episodes predicted as correct $\mathcal{E}_V = \{e_i\in \mathcal{E}|V_{\mathcal{L}}(e_i) = 1,i\in \{1,\dots ,N\} \}$ and perform skill induction only on $\mathcal{E}_V$ . + +Central to our adaptive agents is an induction component $I$ that enables the adaptivity of agents, which can be rule-based (Ellis et al., 2023; Grand et al., 2024) or instantiated by an LM $I(\cdot |LM)$ (Wang et al., 2024b); we follow the latter for its better performance and use $I$ to represent the module for simplicity. For online adaptive agents $\pi_{\mathcal{L}}$ , to induce skills, $I$ is instructed to take in one filtered episode $e$ and output one or more pieces of desired skills $D = \{d\}$ , denoted as $I(e)\to \mathcal{D}$ . Following AWM (Wang et al., 2024b), we update the agent in non-parametric ways that incorporate the induction outcome $I(e_t)\rightarrow d_t$ into the agent, instead of updating the parameters of the underlying LM backbone $\mathcal{L}$ for agent policy $\pi_{\mathcal{L}}$ . + +Unlike AWM which represents skills in free-form text representations and can only augment agent memory via $\mathcal{M}_t \cup \{d_t\} \to \mathcal{M}_{t+1}$ (Figure 1 top), we introduce ASI that represents skills as executable python programs, and directly integrate skills into the agent action space instead, via $\mathcal{A}_t \cup \{d_t\} \to \mathcal{A}_{t+1}$ (Figure 1 bottom). + +# 2.3 Inducing and Verifying Programmatic Skills + +To improve the induction quality, we propose a change in representation from free-form text to executable programs, which offers advantages in correctness and efficiency. For one, the program format enables ready verification on skill correctness by executing them; for another, skill programs abstract multiple lower-level actions into a higher-level function call, thus agents can solve tasks in fewer steps without tackling tricky low-level details. + +Inducing Programmatic Skills We first clean the input episodes to ensure the induction quality. We remove all the steps that cause execution errors such as invalid argument format, to keep these invalid actions from distracting agent predictions. Furthermore, noticing the long and possibly redundant thought process generated by agents along with each action, we simplify each thought text paragraph into a short one-sentence description (e.g., "Clicked the directions button to access the route planning feature") using LM, effectively reducing the thought content from 87.9 to 13.4 tokens per step. + +Given a clean input episode $e$ , we now prompt the induction module $I$ to produce one or more program functions to represent reusable skills $\mathcal{D} = \{d\}$ as executable programs. As exemplified in Figure 2, given the input episode on the left side, the induction module first produces two skills open网络营销reviews() and searchreviews_search_box_id, search_button_id, search_term) in the form of callable program functions. + +Skill Verification With the programmatic nature of ASI's skills, we can readily verify their correctness by executing them and checking if tasks can be solved successfully. While a naive way is to query the agent with the same NL query and allow it to use newly induced skill actions, we find agents may not always use new skills due to the large search space of + +![](images/3f8df25e83e327c28fe7fb5b61b7b295d304d82e52ae78b95dd96f11da1379ce.jpg) +Figure 2: Inducing programmatic skills and rewriting the trajectory from an episode. + +possible action trajectories. To have agents more efficiently generate trajectories that test skills in a more targeted way, we curate a rewritten trajectory prefix $\tau_{D}$ to constrain the first few steps executed in the environment, by rewriting and truncating the input action trajectory $\tau$ , and subsequently asking the agent to complete the prefix to get a full, checkable trajectory $\tau_{f}$ . Concretely, we first take the original action trajectory in the input episode $\tau$ (consisting of primitive actions or previously learned skills), and ask the induction module $I$ to transform it to a skill-using trajectory (Figure 2 bottom right), by replacing sub-trajectories in $\tau$ with calls to the newly induced skill programs $\mathcal{D}$ , if possible. Zooming into the Figure 2 example, this procedure merges click('Marketing') $\rightarrow$ click('All Reviews') to an open_marketreviews() call; transforms fill(757, 'satisfied') $\rightarrow$ click('Search') to a call of the second skill searchreviews('satisfied') with the specified term 'satisfied'; and adopted the last send msg_to_user('2') step directly. Note that we follow Wang et al. (2024b) and induce skills according to each website, so some skills could be tailored to particular webpage contexts such as the 'Marketing' and 'All Reviews' link constants in open_marketreviews, while other skills apply to more versatile setups such as searching for different reviews in searchreviews. + +Next, to avoid spurious successes in skill verification, we truncate the trajectory yielded above by removing any trailing primitive actions after the last call to a skill program. Taking Figure 2 as an example, in the original input trajectory, the last send msg_to_user('2') already sends over the correct answer '2' to the user. However, if we directly adopt this last step into the skill-using trajectory $\tau_{D}$ , then executing it will always return the correct message to the user, regardless of whether the previous skill calls are valid. We thus remove such trailing actions to make sure verification attends to the induced skills we are testing. + +After rewriting and truncation, we get this skill-using trajectory $\tau_{D}$ as the prefix to test skills. We now query the agent $\pi$ again with the same NL query $q$ and first execute $\tau_{D}$ on the environment. We then allow agents to continue generating up to $H_{max} - |\tau_{D}|$ actions to finish the task. In the Figure 2 example, to successfully solve query $q$ , we expect the agent to generate another step of send msg_to_user(...). with the correct answer '2' in the message. We take the concatenation of the trajectory prefix $\tau_{D}$ and the later additionally produced new steps $\tau_{A}$ (e.g., [send msg_to_user('2')]) as the full trajectory $\tau_{f}$ . We then decide whether to add the induced programs $\mathcal{D}$ into the agent skill library as $\mathcal{A}$ by examining $\tau_{f}$ . + +Specifically, we check $\tau_{f}$ from three dimensions: (1) Correctness: if executing $\tau_{f}$ successfully solves the task $q$ as judged by the neural model evaluator $V_{\mathcal{L}}$ ; (2) Skill Usage: if the trajectory contains at least one call to at least one new skill in $\mathcal{D}$ ; and (3) Skill Validity: if all skill-calling actions cause environment changes. If all three boxes are checked, we add the skills being called in the trajectory $\tau_{f}$ to the agent skill library $\mathcal{A}_{t} \cup \mathcal{D}_{\text{called}} \rightarrow \mathcal{A}_{t+1}$ . By adding $\mathcal{D}_{\text{called}}$ , the agent can now generate actions that call these skill programs to solve subsequent tasks. + +# 3 General Web Navigation Performance + +# 3.1 Experiment Setup + +Benchmark and Evaluation To evaluate ASI on general web navigation scenarios, we adopt the WebArena benchmark (Zhou et al., 2024a) that contains 812 test examples covering + +
ModelMethod# StepsSRShopAdminRedditGitLabMapsMulti
GPTVanilla-12.313.910.46.615.015.68.3
AWM5.935.532.129.154.735.042.218.8
ClaudeVanilla5.632.732.636.836.826.138.520.8
AWM5.936.334.839.051.928.939.418.8
ASI (ours)5.040.440.144.054.732.243.120.8
+ +Table 1: WebArena success rate by adaptive agents with programmatic skills, in comparison to a static vanilla agent baseline, and a text-skill learning adaptive agent. + +five major web activity domains: e-commerce, social forum, software development, content management, and travel. Each example in WebArena has an NL query $q$ for the task, and a program-based evaluator that provides a binary $0/1$ score for any given trajectory $\tau$ to judge if it successfully solves the task $q$ . This program-based evaluator enables relatively rigorous evaluation based on the functional correctness of the action trajectory. We report the average score across all WebArena examples, if not specified otherwise. + +Backbone LM and Agent Architecture We use the top-performing claude-3.5-sonnet model as the LM backbone for all components, including the agent policy $\pi$ , the neural evaluator $V$ , and the skill induction modules $I$ . For experimentation, we use the BrowserGym (Chezelles et al., 2024) framework, which takes the webpage accessibility tree as observation, and instantiates the skill library $\mathcal{A}$ with the WebArena default action space listed in $\S A$ . + +Baselines We take the vanilla Claude model with the BrowserGym framework (Drouin et al., 2024) as the non-adaptive agent baseline. Additionally, we compare ASI to AWM (Wang et al., 2024b), the current top-performing online adaptive web agent method. Because AWM was originally developed with the gpt-4o model, for a fairer comparison, we also experiment with AWM with claude-3.5-sonnet model as its LM backbone and also apply the episode cleaning procedure to enhance induction quality. We compare the two baseline methods with our ASI approach. We provide the complete prompts for each agent component: task-solving, episode evaluation, episode cleaning, and skill induction, in SA. + +# 3.2 Results and Analysis + +In Table 1, compared to the vanilla static-agent baseline, adaptive agents (AWM and ASI) generally achieve $11.0 - 23.5\%$ higher success rates overall. Among adaptive agents, our ASI with programmatic skills, achieves another $11.3\%$ success rate gain across websites, compared to its AWM counterpart that induces and uses textual skills. Meanwhile, ASI offers additional efficiency benefits by reducing the number of steps in solutions by $15.3\%$ and $10.6\%$ than vanilla and AWM agents, as one skill-call action can often execute multiple steps written in primitive actions used by vanilla and AWM agents. These advantages in correctness and efficiency are exhibited prominently across different websites and tasks, as shown by the website breakdown on Table 1 (right). Refer to $\S B$ for more analysis. + +# 3.3 Why are Programmatic Skills Better? + +To more concretely answer why programmatic skills are more effective than textual skills, we take a closer look on the two main differences between AWM and ASI: [1] whether the induction outcome is verified via execution, and [2] whether the induced skills are provided in memory for reference purpose only, or in the action space that allows execution. + +Better Induction Quality We take the shopping website as a representative, and analyze the textual and program skills induced by AWM and ASI agents. We group textual and program skills by their functionality and show one representative example in Table 2. Compared to the clear functional boundary and highly-reusable granularity of the search_product skill, we find that the textual skills often have (1) more redundant steps, (2) example-specific context: e.g., the last text skill aims to find 'game accessories' while the steps generally apply to any product, and (3) fuzzier boundaries between separable tasks, e.g., the first skill mixes product-search and add-to-wishlist procedures, thus may not offer optimal guidance when asked to, e.g., search product and add it to cart instead. + +
Programmatic SkillsTextual Skills
def search_product_search_box_id: str, query: str): + ""Search for a product using the search box. + Args: + search_box_id: ID of the search input field + query: Search query string to enter + Returns: + None +Examples: + search_product('595', 'sony bluetooth headphones') + "" +click_search_box_id) +fill_search_box_id, query) +keyboard_press('Enter'))Task: Search for a product and add it to wish list +Action Trajectory: + __fill(621, {product_name})__ # Enter the product +name in the search box + __click(478)__ # Click the search button to execute the +search + __click({product_link})__ # Click the product to check +more details + __click(1769)__ # Click the "Add to Wish List" link
Task: Search for a product's price range in the store +Action Trajectory: + __fill(565, {product_name})__ # Enter the product +name in the search box + __click(570)__ # Click the search button to execute the +search + __noop(1000)__ # Wait for search results to load + __send msg_to_user({price_range_info})__ # +Analyze and report the price range findings from the +search results
Task: Search for gaming accessories within a date range +Action Trajectory: + __click(1274)__ # Navigate to the Video Games category + __fill(473, {search_terms})__ # Enter search terms +including product name and year + __click(478)__ # Execute the search
+ +Verified Induction Improves End Success Rate From qualitative examination of the induction outcomes, we find roughly similar numbers of episodes evaluated as correct and used for induction (70 and 58 examples for AWM and ASI), ASI produced programs pass verification for only $15.6\%$ of the turns, whereas AWM adds new skills for $31.4\%$ of the time (replace or add none otherwise). While skill usage (in memory or as action, [2]) is designated for AWM and ASI, we hypothesize that verification [1] affects induction quality and thus end success. We thus experiment with another setting that induces programs (such that verification is enabled), and only use the induced skills in memory, to study the importance of induction quality. As shown in Table 3, inducing skills with execution-based verification (i.e., (unverified, text) $\rightarrow$ (verified, program)), while always present skills in memory, improves end success rate by 4.2 points, indicating the importance of higher-quality induction via verification. Yet it is still 3.7 points lower than ASI, suggesting the incompatibility of program format to agent memory. Indeed, we observe many cases where the agent tries to call the skill programs but unsuccessfully, since they are not supported in the action space. + +Textual Representations Suit Memory Better To prevent the agent from trying to call these plausible programs, we ablate another setting that transforms program skills to textual format (as Table 2 right) and provide them in agent memory, dubbed (verified, text). This format transformation effectively improves the overall success rate by another 2.6 points, getting a little closer to ASI. Given the different downstream usage, + +Table 2: Example textual and program skills induced on the shopping website. + +
MethodSR
Add to Memoryunverified, text32.6
verified, program36.4
verified, text39.0
Add as Actionsverified, program40.1
+ +Table 3: Ablation study on induction verification and format on the shopping website. + +i.e., memory or actuation, textual and program formats may suit individual scenarios better. + +Beyond basic web navigation tasks, in the next two sections, we examine agents in two other important scenarios, scaled-up activities (§4) and cross-website generalization (§5). + +# 4 Scaled-Up Browsing Activities + +The WebArena benchmark mainly features isolated, single-task scenarios, such as adding a single product to the shopping cart. However, in real-world practices, people need to do a series of such tasks together, such as adding multiple related products (e.g., coffee and + +mug) to the cart before finally checking out. This browsing request can lead to extremely long-horizon tasks, sometimes with repetitive intermediate procedures. We identify this to be a scenario to further demonstrate the efficacy of program skills, as opposed to textual skills, as programs lend themselves naturally to repeated invocation and composition. + +Therefore, we curate several case scenarios where the user asks for action-dense instructions, such as the tasks listed in Figure 3. Because the tasks are long-horizon and involve multiple sub-tasks, we follow Xu et al. (2024) and set up intermediate checkpoints to better track the intermediate progress of agents. Refer to SC.1 to see the full list of tasks and their evaluation checkpoints. We measure the success rate of each example by the percentage of checkpoints achieved by the agent. We report the average success rate of all examples, as well as the average number of steps taken to solve the tasks, in Table 4. + +
MethodShoppingAdminRedditGitLabMap
sr ↑# steps ↓sr ↑# steps ↓sr ↑# steps ↓sr ↑# steps ↓sr ↑# steps ↓
VANILLA41.723.558.020.833.323.033.340.040.015.2
AWM68.321.574.018.240.016.850.033.865.012.6
ASI (ours)100.016.391.014.255.012.855.025.4100.06.2
+ +Table 4: Performance of vanilla, AWM, and ASI agents in scaled-up browsing scenarios. We perform statistical testing between ASI and each baseline and verify all improvements are statistically significant with t-statistics $|t| > 2$ and $p < 0.05$ ; see SC.3 for more details. + +ASI Features Improved Efficiency Across all websites, ASI-produced trajectories have 6.6-14.6 and $4.0 - 8.4\%$ fewer steps, compared to vanilla and AWM baselines, respectively. As the task horizon continues to grow when involving more intermediate checkpoints, this margin between ASI and baselines will predictably be more prominent. + +Subsequent Benefits in Success Rate ASI also achieves higher success rates with more efficient trajectories, outperforming vanilla and AWM baselines by $38.9\%$ and $20.7\%$ on average. From manual analysis, we find this improvement comes from easier, better agent planning when using higher-level skills, without the need to tackle more complex procedures if only low-level primitive actions are available, as with vanilla and AWM agents. + +Case Study: Changing Multiple Addresses We present a representative case on the shopping website: changing billing and shipping addresses after moving. As depicted in the top row in Figure 3, the vanilla agent without adaptive skills often roams into some irrelevant exploration steps, instead of sticking to the optimal route to solve the required task. It runs for minutes and exhausts the maximum steps (i.e., 50) before finishing the task. + +![](images/810aadc8162de8b670c598520349aa2a623d11538656c116f1a472d096254632.jpg) +Figure 3: Example scaled-up task of updating multiple addresses on shopping website. + +With AWM, adding textual skills in memory provides soft guidelines for agents to follow, the agent thus better sticks to the goal and finishes each part of the task (i.e., navigate to the address page, update billing/shipping address) step by step. Although successful, the trajectory is long, i.e., 27 steps, and still takes a few minutes to finish. + +In comparison, ASI (in Figure 3 bottom row) showcases its efficiency by using learned skills to navigate_to_address_settings and update_address_details can solve each part in one step (vs. the 3-6 steps used by AWM for these parts). Overall, ASI correctly finishes all required actions in only 4 steps, shortening the horizon by $85.2\%$ compared to AWM. + +# 5 Adapting Across Websites + +To examine whether agents can generalize with learned skills, we test agents on real-world website counterparts for some of the domains in WebArena as listed in Table 5. This experiment setup can reflect on (1) transfer across different websites of the same domain, + +and (2) transfer from simulated, sandboxed to real-world websites. + +
DomainWebArena SandboxedReal-World
shoppingOneStopMarketTarget
online forumPostMillReddit
travelOpenStreetMapGoogle Maps
+ +Table 5: Real-world in-domain website counterparts to each WebArena sandboxed website. + +For each sandbox-real website pair, we take ten information-seeking style queries (He et al., 2024) in WebArena that do not involve potential privacy leakage or unrecoverable risky actions, such as making a purchase or changing user password. We provide the task details in §C.2. We compare ASI and AWM with their programmatic and textual skills as learned in §3, as well as comparing to the vanilla static agent baseline. + +Transferring Common Skills In Figure 4, we can see how ASI can effectively reuse common skills such as search_product in the first step on the Target website. + +Incompatible Skills One challenge faced by ASI is that some prior skills become incompatible on the new website. For example, the sort_by Listings() induced on OneStopMarket selects options from a dropdown menu, yet sorting on + +the Target website opens a sidebar; despite their semantic similarity, the concrete actions in skill programs are no longer applicable. Still, we find that agents can often spot this incompatibility and rarely attempt to use these deprecated skills. + +![](images/32b98aeaec3843cbaad2caf4552567bbfe930405b8409da2257233ba69eff45a.jpg) +Figure 4: ASI can generalize the search_product skill but face incompatibility when sorting items. + +Adapting Skills to New Environment Although some skills induced on previous websites + +cannot be directly used on arbitrary new websites, we hypothesize that these skills can still serve as informative references on solving procedurally similar tasks or composing new skills targeted for the new website design. + +
MethodShoppingRedditMap
sr ↑# steps ↓sr ↑# steps ↓sr ↑# steps ↓
Vanilla80.05.440.04.863.37.4
AWM80.05.056.74.8100.06.2
ASI90.03.476.74.493.34.4
AWM + update80.05.463.35.8100.07.2
ASI + update90.03.276.74.093.34.2
+ +Table 6: Cross-website results. ASI significantly surpasses baselines in sr and # steps (with $|t| > 2$ and $p < 0.05$ ) from our analysis in §C.3. + +We thus allow agents to induce new skills or update previously acquired skills from experiences on the new website, denoted as +update entries in Table 6. We find that enabling skill update in both textual and program formats helps agent performance on new websites. Within the short online learning process (tens of examples), AWM adapts faster to the new websites, while ASI sees a more pronounced improvement in efficiency. + +# 6 Related Work + +Adaptive Digital Agents An important thread of agent-improving methods is to build adaptive agents that can autonomously self-improve from experiences. Most works focus on integrating past experiences into agent memory by collecting human annotation (Deng et al., 2024) or LM-based synthesis (Ou et al., 2024; Xu et al., 2025), especially via agent-driven exploration with instruction- (Murty et al., 2024b) or trajectory-driven (Murty et al., 2024a) approaches, offering warm starts on the websites of interest. Other works gather experiences (Wang et al., 2024b) or feedback (Qu et al., 2024) during test time, and augment them into memory through parametric channels such as supervised fine-tuning (Murty et al., 2024a), contrastive learning (Song et al., 2024), or reinforcement learning (Zhou et al., 2024b). Meanwhile, non-parametric approaches can directly augment reflections (Shinn et al., 2023), raw past experiences (Wang et al., 2023; Zheng et al., 2023), or further induced reusable workflows (Wang et al., 2024b). While these adaptive agents learn textual skills stored in memory, our ASI stores skills as verifiable and composable programs in the agent action space (i.e., skill library), thus enabling better quality and efficiency. + +Skill Discovery and Learning Learning specialized skills for tasks in programmatic (Shin et al., 2019; Ellis et al., 2023; Cai et al., 2024; Wang et al., 2024a; Grand et al., 2024), embodied (Sharma et al., 2022; Wang et al., 2023; Liang et al., 2023; Sarch et al., 2024; Wong et al., 2024), and physical (Yu et al., 2023) environments has shown to success in agent performance. Particularly for digital agents built for web navigation tasks, most works focus on exploring skills offline with RL roll-outs (Gur et al., 2018; Liu et al., 2018; Putta et al., 2024; Qi et al., 2024) or LM-based prompting (Zhou et al., 2024b; Murty et al., 2024a; Patel et al., 2024). While this exploration stage could offer some supervised data to update the agent policy either parametric (Murty et al., 2024a; Patel et al., 2024) or non-parametrically (Zheng et al., 2023; Murty et al., 2024b), it often costs enormous extra computation and may suffer from the lack or mismatch in distribution with the downstream tasks at hand (Wang et al., 2024b). In contrast, our ASI does not rely on supervised data and can directly learn skills online without prior exploration. + +Web Navigation Benchmarks Digital agents have been explored across a wide range of tasks (Yao et al., 2024; Kapoor et al., 2025; Xie et al., 2024b), among which one of the most popular application being browsing and navigating through versatile websites such as shopping (Yao et al., 2022), social media communication (Zhou et al., 2024a; Koh et al., 2024), knowledge work tasks (Drouin et al., 2024), and more (Deng et al., 2024). Our work focuses on general web navigation tasks using the WebArena (Zhou et al., 2024a) benchmark, meanwhile exploring other challenging scenarios such as scaled-up activities (Yoran et al., 2024) and cross-domain generalization (Deng et al., 2024). + +# 7 Conclusion and Future Discussions + +In this work, we present ASI to support web navigation agents to autonomously induce, verify, learn, and apply programmatic skills during online inference. Beyond achieving $23.5\%$ success rate and $15.3\%$ efficiency increases in general web tasks, we also showcase ASI's strengths for scaled-up web activities, thanks to the high-level action interface offered by the programmatic abstraction. Moreover, we examine skill generalizability to new, real-world websites, and find ASI still offers great efficiency while flexibly updating skills to new environments. While our work aims to offer insights on the optimal representation in agent skill acquisition, we still find multiple pieces in ASI worthy of further investigation, such as the conceptually or empirically suitable granularity of skills, the stability of the online evolving process, and the skill quality in comparison to human expert desiderata. + +# Acknowledgments + +We would like to thank Jiayuan Mao, Yueqi Song, Boyuan Zheng, and Yu Su for the insightful discussions. We thank Yiqing Xie, Xinran Zhao, and Mingqian Zheng for their helpful comments on the paper draft. Zora is supported by the CMU Presidential Fellowship and Fujitsu Research. Apurva is supported by Amazon. + +# References + +Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin (eds.), Advances in Neural Information Processing Systems, volume 33, pp. 1877-1901. Curran Associates, Inc., 2020. URL https://proceedings.neurips.cc/paper_files/paper/2020/file/1457c0d6bfcb4967418bf8ac142f64a-Paper.pdf. +Tianle Cai, Xuezhi Wang, Tengyu Ma, Xinyun Chen, and Denny Zhou. Large language models as tool makers. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=qV83K9d5WB. +De Chezelles, Thibault Le Sellier, Maxime Gasse, Alexandre Lacoste, Alexandre Drouin, Massimo Caccia, Léo Boisvert, Megh Thakkar, Tom Marty, Rim Assouel, et al. The browsergym ecosystem for web agent research. arXiv preprint arXiv:2412.05467, 2024. +Xiang Deng, Yu Gu, Boyuan Zheng, Shijie Chen, Sam Stevens, Boshi Wang, Huan Sun, and Yu Su. Mind2web: Towards a generalist agent for the web. Advances in Neural Information Processing Systems, 36, 2024. +Alexandre Drouin, Maxime Gasse, Massimo Caccia, Issam H Laradji, Manuel Del Verme, Tom Marty, Léo Boisvert, Megh Thakkar, Quentin Cappart, David Vazquez, et al. Workarena: How capable are web agents at solving common knowledge work tasks? arXiv preprint arXiv:2403.07718, 2024. +Kevin Ellis, Lionel Wong, Maxwell Nye, Mathias Sable-Meyer, Luc Cary, Lore Anaya Pozo, Luke Hewitt, Armando Solar-Lezama, and Joshua B Tenenbaum. Dreamcoder: growing generalizable, interpretable knowledge with wake-sleep bayesian program learning. Philosophical Transactions of the Royal Society A, 381(2251):20220050, 2023. +Yao Fu, Dong-Ki Kim, Jaekyeom Kim, Sungryull Sohn, Lajanugen Logeswaran, Kyunghoon Bae, and Honglak Lee. Autoguide: Automated generation and selection of state-aware guidelines for large language model agents. CoRR, abs/2403.08978, 2024. URL https://doi.org/10.48550/arXiv.2403.08978. +Gabriel Grand, Lionel Wong, Matthew Bowers, Theo X. Olausson, Muxin Liu, Joshua B. Tenenbaum, and Jacob Andreas. LILO: Learning interpretable libraries by compressing and documenting code. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=TqYbAWKMJe. +Izzeddin Gur, Ulrich Rückert, Aleksandra Faust, and Dilek Hakkani-Tür. Learning to navigate the web. CoRR, abs/1812.09195, 2018. URL http://arxiv.org/abs/1812.09195. +Hongliang He, Wenlin Yao, Kaixin Ma, Wenhao Yu, Yong Dai, Hongming Zhang, Zhenzhong Lan, and Dong Yu. Webvoyager: Building an end-to-end web agent with large multimodal models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics, 2024. URL https://aclanthology.org/2024.acl-long.371/. + +Raghav Kapoor, Yash Parag Butala, Melisa Russak, Jing Yu Koh, Kiran Kamble, Waseem AlShikh, and Ruslan Salakhutdinov. Omniact: A dataset and benchmark for enabling multimodal generalist autonomous agents for desktop and web. In European Conference on Computer Vision, pp. 161-178. Springer, 2025. +Jing Yu Koh, Robert Lo, Lawrence Jang, Vikram Duvvur, Ming Chong Lim, Po-Yu Huang, Graham Neubig, Shuyan Zhou, Ruslan Salakhutdinov, and Daniel Fried. Visualwebarena: Evaluating multimodal agents on realistic visual web tasks. arXiv preprint arXiv:2401.13649, 2024. +Sergey Levine, Aviral Kumar, George Tucker, and Justin Fu. Offline reinforcement learning: Tutorial, review, and perspectives on open problems. arXiv preprint arXiv:2005.01643, 2020. +Jacky Liang, Wenlong Huang, Fei Xia, Peng Xu, Karol Hausman, Brian Ichter, Pete Florence, and Andy Zeng. Code as policies: Language model programs for embodied control. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pp. 9493-9500. IEEE, 2023. +Evan Zheran Liu, Kelvin Guu, Panupong Pasupat, and Percy Liang. Reinforcement learning on web interfaces using workflow-guided exploration. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id=ryTp3f-0-. +Bodhisattwa Prasad Majumder, Bhavana Dalvi, Peter Jansen, Oyvind Tafjord, Niket Tandon, Li Zhang, Chris Callison-Burch, and Peter Clark. CLIN: A continually learning language agent for rapid task adaptation and generalization, 2024. URL https://openreview.net/forum?id=d5DGVHMsC. +Shikhar Murty, Dzmitry Bahdanau, and Christopher D. Manning. Nnetscape navigator: Complex demonstrations for web agents without a demonstrator, 2024a. URL https://arxiv.org/abs/2410.02907. +Shikhar Murty, Christopher Manning, Peter Shaw, Mandar Joshi, and Kenton Lee. Bagel: Bootstrapping agents by guiding exploration with language, 2024b. URL https://arxiv.org/abs/2403.08140. +Tianyue Ou, Frank F. Xu, Aman Madaan, Jiarui Liu, Robert Lo, Abishek Sridhar, Sudipta Sengupta, Dan Roth, Graham Neubig, and Shuyan Zhou. Synatra: Turning indirect knowledge into direct demonstrations for digital agents at scale. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=KjNEzWRIqn. +Yichen Pan, Dehan Kong, Sida Zhou, Cheng Cui, Yifei Leng, Bing Jiang, Hangyu Liu, Yanyi Shang, Shuyan Zhou, Tongshuang Wu, and Zhengyang Wu. Webcanvas: Benchmarking web agents in online environments. In Agentic Markets Workshop at ICML 2024, 2024. URL https://openreview.net/forum?id=O1FaGasJob. +Ajay Patel, Markus Hofmarcher, Claudiu Leoveanu-Condrei, Marius-Constantin Dinu, Chris Callison-Burch, and Sepp Hochreiter. Large language models can self-improve at web agent tasks. arXiv preprint arXiv:2405.20309, 2024. +Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv preprint arXiv:2408.07199, 2024. +Zehan Qi, Xiao Liu, Iat Long Iong, Hanyu Lai, Xueqiao Sun, Wenyi Zhao, Yu Yang, Xinyue Yang, Jiadai Sun, Shuntian Yao, et al. Webrl: Training llm web agents via self-evolving online curriculum reinforcement learning. arXiv preprint arXiv:2411.02337, 2024. +Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive introspection: Teaching language model agents how to self-improve, 2024. URL https://arxiv.org/abs/2407.18219. + +Gabriel Sarch, Lawrence Jang, Michael Tarr, William W Cohen, Kenneth Marino, and Katerina Fragkiadaki. Vlm agents generate their own memories: Distilling experience into embodied programs of thought. Advances in Neural Information Processing Systems, 37: 75942-75985, 2024. +Amrith Setlur, Nived Rajaraman, Sergey Levine, and Aviral Kumar. Scaling test-time compute without verification or rl is suboptimal. arXiv preprint arXiv:2502.12118, 2025. +Pratyusha Sharma, Antonio Torralba, and Jacob Andreas. Skill induction and planning with latent language. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics, May 2022. URL https://aclanthology.org/2022.acl-long.120/. +Eui Chul Shin, Miltiadis Allamanis, Marc Brockschmidt, and Alex Polozov. Program synthesis and semantic parsing with learned code idioms. Advances in Neural Information Processing Systems, 32, 2019. +Noah Shinn, Federico Cassano, Edward Berman, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. Reflexion: Language agents with verbal reinforcement learning, 2023. URL https://arxiv.org/abs/2303.11366. +Yifan Song, Da Yin, Xiang Yue, Jie Huang, Sujian Li, and Bill Yuchen Lin. Trial and error: Exploration-based trajectory optimization for llm agents, 2024. URL https://arxiv.org/abs/2403.02502. +Guanzhi Wang, Yuqi Xie, Yunfan Jiang, Ajay Mandlekar, Chaowei Xiao, Yuke Zhu, Linxi Fan, and Anima Anandkumar. Voyager: An open-ended embodied agent with large language models, 2023. URL https://arxiv.org/abs/2305.16291. +Zhiruo Wang, Graham Neubig, and Daniel Fried. TroVE: Inducing verifiable and efficient toolboxes for solving programmatic tasks. In *Forty-first International Conference on Machine Learning*, 2024a. URL https://openreview.net/forum?id=DCNCwamJjI. +Zora Zhiruo Wang, Jiayuan Mao, Daniel Fried, and Graham Neubig. Agent workflow memory. arXiv preprint arXiv:2409.07429, 2024b. +Lionel Wong, Jiayuan Mao, Pratyusha Sharma, Zachary S Siegel, Jiahai Feng, Noa Korneev, Joshua B. Tenenbaum, and Jacob Andreas. Learning grounded action abstractions from language. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=qJ0Cfj4Ex9. +Jian Xie, Kai Zhang, Jiangjie Chen, Tinghui Zhu, Renze Lou, Yuandong Tian, Yanghua Xiao, and Yu Su. Travelplanner: A benchmark for real-world planning with language agents. In *Forty-first International Conference on Machine Learning*, 2024a. URL https://openreview.net/forum?id=15XQzNkAOe. +Tianbao Xie, Danyang Zhang, Jixuan Chen, Xiaochuan Li, Siheng Zhao, Ruisheng Cao, Toh Jing Hua, Zhoujun Cheng, Dongchan Shin, Fangyu Lei, et al. Osworld: Benchmarking multimodal agents for open-ended tasks in real computer environments. arXiv preprint arXiv:2404.07972, 2024b. +Frank F Xu, Yufan Song, Boxuan Li, Yuxuan Tang, Kritanjali Jain, Mengxue Bao, Zora Z Wang, Xuhui Zhou, Zhitong Guo, Murong Cao, et al. Theagentcompany: benchmarking llm agents on consequential real world tasks. arXiv preprint arXiv:2412.14161, 2024. +Yiheng Xu, Dunjie Lu, Zhennan Shen, Junli Wang, Zekun Wang, Yuchen Mao, Caiming Xiong, and Tao Yu. Agenttrek: Agent trajectory synthesis via guiding replay with web tutorials. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=EEgYUccwsV. +Shunyu Yao, Howard Chen, John Yang, and Karthik Narasimhan. Webshop: Towards scalable real-world web interaction with grounded language agents. Advances in Neural Information Processing Systems, 35:20744-20757, 2022. + +Shunyu Yao, Noah Shinn, Pedram Razavi, and Karthik Narasimhan. $\tau$ -bench: A benchmark for tool-agent-user interaction in real-world domains. arXiv preprint arXiv:2406.12045, 2024. +Ori Yoran, Samuel Joseph Amouyal, Chaitanya Malaviya, Ben Bogin, Ofir Press, and Jonathan Berant. Assistantbench: Can web agents solve realistic and time-consuming tasks? arXiv preprint arXiv:2407.15711, 2024. +Wenhao Yu, Nimrod Gileadi, Chuyuan Fu, Sean Kirmani, Kuang-Huei Lee, Montserrat Gonzalez Arenas, Hao-Tien Lewis Chiang, Tom Erez, Leonard Hasenclever, Jan Humplik, brian richter, Ted Xiao, Peng Xu, Andy Zeng, Tingnan Zhang, Nicolas Heess, Dorsa Sadigh, Jie Tan, Yuval Tassa, and Fei Xia. Language to rewards for robotic skill synthesis. In 7th Annual Conference on Robot Learning, 2023. URL https://openreview.net/forum?id= SgTPdyehXMA. +Longtao Zheng, Rundong Wang, Xinrun Wang, and Bo An. Synapse: Trajectory-as-exemplar prompting with memory for computer control. In The Twelfth International Conference on Learning Representations, 2023. +Shuyan Zhou, Frank F. Xu, Hao Zhu, Xuhui Zhou, Robert Lo, Abishek Sridhar, Xianyi Cheng, Tianyue Ou, Yonatan Bisk, Daniel Fried, Uri Alon, and Graham Neubig. Webarena: A realistic web environment for building autonomous agents. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=oKn9c6ytLx. +Yifei Zhou, Qianlan Yang, Kaixiang Lin, Min Bai, Xiong Zhou, Yu-Xiong Wang, Sergey Levine, and Erran Li. Proposer-agent-evaluator (pae): Autonomous skill discovery for foundation model internet agents. arXiv preprint arXiv:2412.13194, 2024b. + +# A Experiment Details + +# A.1 Agent Action Space + +Table 7 shows the default action space the web navigation agents we employed in all the experiments. This action space remains the same for both (i) static, vanilla agent, as well as the (ii) adaptive agent that learn textual skills in memory, i.e., AWM. + +
Action TypeDescription
noopwait_ms)Do nothing for specified time.
click(elem)Click at an element.
hover(elem)Hover on an element.
fill(elem, value)Type into an element.
keyboard_press(key_comb)Press a key combination.
scroll(x, y)Scroll horizontally or vertically.
select_option(elem, options)Select one or multiple options.
goto(url)Navigate to a url.
go_back()Navigate to the previous page.
go_forward()Navigate to the next page.
new_tab()Open a new tab.
tab_close()Close the current tab.
tabfocus(index)Bring tab to front.
send msg_to_user(text)Send a message to the user.
report_infeasible Reason)Notify user that instructions are infeasible.
+ +Table 7: Initial action space for agents throughout our experiments. + +# A.2 Prompts for LLM-Based Components + +# Prompt for Episode Evaluation + +You are an expert in evaluating the performance of a web navigation agent. The agent is designed to help a human user navigate a website to complete a task. Given the user's intent, the agent's action history, the final state of the webpage, and the agent's response to the user, your goal is to decide whether the agent's execution is successful or not. Please be careful of each detail and strict about the evaluation process. + +There are three types of tasks: 1. Information seeking: The user wants to obtain certain information from the webpage, such as the information of a product, reviews, map info, comparison of map routes, etc. The bot's response must contain the information the user wants, or explicitly state that the information is not available. Otherwise, e.g. the bot encounters an exception and respond with the error content, the task is considered a failure. Besides, be careful about the sufficiency of the agent's actions. For example, when asked to list the top-searched items in a shop, the agent should order the items by the number of searches, and then return the top items. If the ordering action is missing, the task is likely to fail. 2. Site navigation: The user wants to navigate to a specific page. Carefully examine the bot's action history and the final state of the webpage to determine whether the bot successfully completes the task. No need to consider the bot's response. 3. Content modification: The user wants to modify the content of a webpage or configuration. Carefully examine the bot's action history and the final state of the webpage to determine whether the bot successfully completes the task. No need to consider the bot's response. + +*IMPORTANT* Please be strict about the evaluation process. Format your response into two lines as shown below: + +Thoughts: + +Status: "success" or "failure" + +# Prompt for Episode Cleaning + +You are a helpful assistant in summarizing web browsing actions. Your task is to summarize the main action taken and the corresponding state change, from the input thought process and environmental description. + +Example input: "Let me think about this step by step: 1) I've clicked the Columns button and based on the accessibility tree, I can now see a list of available columns that can be shown/hidden. 2) Looking through the available columns in the accessibility tree, I don't see any direct column for "Number of Orders" or similar metric. 3) Since I don't see the information I need in the available columns, this suggests that: - Either this information is available through a different path - Or we need to access the Sales/Orders section to correlate this information 4) The most logical path would be to: - Go to the Sales section where we can see all orders - And then analyze which customers have the most orders 5) From the accessibility tree, I can see the "SALES" link with bid "183" in the main navigation. + +Let me click on the SALES link: This should open the Sales menu where we can access the Orders section to analyze customer order histories." + +Example output: "Clicked the SALES link to access the Orders section for customer order histories." + +# Prompt for Skill Induction + +You are a proficient software engineer. Your task is to (1) summarize reusable functions as APIs from the provided action trajectories, and (2) rewrite the trajectories using the reusable functions you generated in (1). + +For (1), from the provided examples about the same task, you job is to generate Python functions that can be reused to solve (part of) these tasks. The functions should have mediocre complexity: (i) containing at least three actions and not too simple (e.g., a single line of code), (ii) not too complex (e.g., more than 10 lines of code), and should be general enough to be applied to other similar tasks. The arguments to these functions should be common variables (such as strings and lists), avoid using complex inputs such as another function. + +The actions you can use are: 'click', 'fill', 'hover', 'keyboard_press', 'scroll'; 'tabfocus', 'new_tab', 'tab_close', 'go_back', 'go_forward'; 'send msg to user', 'report_infeasible', 'select_option'. Do not use other undefined actions. Do not include any try-except blocks in the functions. + +Please include 'Args', 'Returns', and 'Examples' in the function documentation. For (2), write the instruction and rewritten code of each example. Do not include the answer response or example-specific information in the rewritten code. Pay attention to whether all link IDs are available before specifying them in the generated functions. If you use 'send msg_to_user', make sure the message is decided within the function, instead of provided as an argument. + +Make sure each function contains no less than 2 steps, and no more than 5 steps; to keep the functions simple and task-oriented. You can generate zero, one, or multiple functions depending on the provided examples. + +# B Skill Induction: Analysis + +We provide more details about the skill induction process, in skill curation and reuse frequency (§B.1) and representative skill case studies (§B.2) + +# B.1 Skill Induction and Reusability + +To provide more insights on how agents curate and reuse programmatic skills, for the main experiments on WebArena, we calculate the number of examples that (i) attempt to induce a new skill, (ii) successfully induce a new skill, and (iii) reuse a previously induced skill. + +As shown in Table 8, the agent reuses at least 1 skill for $42.5\%$ of the cases, which is pretty frequent across websites. Moreover, these skills are created using $7.6\%$ of the tasks, demonstrating the high scalability of our skill learning approach. + +
DomainAttempted InductionNumber of Examples
Successful InductionReuse ExamplesTotal
shopping21896180
admin3815108156
reddit241114105
map131080109
gitlab251110175
+ +Table 8: Analyzing the number of examples that attempt, induce, and reuse skills. + +# B.2 Representative Skills + +We demonstrate two representative types of skills, which (i) chain frequently-used-together actions, and (ii) involve advanced programming primitives. + +Chaining Primitive Actions The most common type of skills chains multiple frequently-used-together primitive actions (e.g., click, fill) in a certain order, to reflect a particular common usage, such as the search_product tool illustrated below. This type of skill provides an easy portal for frequent workflows. + +```python +def search_product(name: str): + click("Search textbox...") + if has_pop_window(): + click("Close") + fill("Search textbox", name) + click("Search button") +``` + +Advanced Programming Primitives Besides a sequential chaining of actions, some skills contain more advanced logics, such as if-else or for/while loops, as the two examples below. + +```python +def navigate_and_sort_category(category_id: str, subcategory_id: str | None = None): + '''"Navigate to a product category and sort items by price. +Args: + category_id: ID of the main category link + subcategory_id: Optional ID of the subcategory link, if needed +Returns: + None +Examples: + navigate_and_sort_category('1241', '1873') # PS4 under Video Games + navigate_and_sort_category('1245') # For main category only + '''' click(category_id) # Click main category +if subcategory_id: + click(subcategory_id) # Click subcategory if provided +select_option("1553", "Price") # Sort by price ascending +``` + +```python +def browse_category_byjahicationmenu_id_sequence: list): + ""Browse products by navigating through a sequence of menu IDs. + This function allows navigation through a series of menu interactions. + Args: + menu_id_sequence(list): A list of menu IDs to interact + sequentially, using hover actions followed by a click. +``` + +```txt +Example usage: browse_category_byjahgnation['735','786','797']#Navigates Home&Kitchen->Storage&Organization->Racks,Shelves& Drawers +"''" foridx,menu_idin enumerate(menu_id_sequence[-1]): hover/menu_id) click菜单_id_sequence[-1])#Click the final id to land in the predefined category +``` + +# C Scaled-Up and Cross-Website Tests + +We provide the full list of tasks used in scaled-up (S4) and cross-website (S5) analyses in S.C.1 and S.C.2, respectively. In S.C.3, we further perform significance testing to validate the findings from Table 4 and Table 6. + +# C.1 Scaled-Up Tasks + +Table 9, Table 10, Table 11, Table 12 and Table 13 shows example scaled-up tasks studied on the shopping, admin, social forum, software development, and map websites. + +
InstructionCheckpointsScore
Add a wireless headphone, a water bottle, a notebook, a ground coffee, and a mug to my shopping cart.Add a wireless headphone to cart; Add a water bottle to cart; Add a notebook to cart; Add a ground coffee to cart; Add a mug to cart.5
Add the most expensive item from the video games category, the cheapest item from the Office Products category, and the most relevant coffee mug to my shopping cart.Add the most expensive item from the video games category to cart; Add the cheapest item from the Office Products category to cart; the most relevant coffee mug to my shopping cart.3
Add the cheapest wireless headphone, a water bottle, the most expensive notebook, a ground coffee, and a mug to my shopping cart.Add the cheapest wireless headphone to cart; Add a water bottle to cart; Add the most expensive notebook to cart; Add a ground coffee to cart; Add a mug to cart.5
Show me the ordered items for each cancelled order from Feb to May in 2023.Show me the 5/17/23 order; Show me the 2/24/23 order; Show me the 2/11/23 order.3
Iterative update my billing address to 231 Willow Way, Suite 100, Chicago, IL, 60601. Then, update my shipping address to 987 Sycamore Circle, Philadelphia, PA, 19102.Successfully update my billing address; Successfully update my shipping address.2
+ +Table 9: Exemplar scaled-up browsing tasks on the shopping website. + +# C.2 Cross-Website Tasks + +Table 14, Table 15, and Table 16 lists example tasks to test agent generalization abilities on shopping (OneStopMarket to Target), social forum (Postmill to Reddit), and software development (GitLab to GitHub) domains. + +# C.3 Significance Testing + +
InstructionCheckpointsScore
Tell me the number of reviews that our store received by far that mention terms 'disappointed', 'satisfied', 'decent', 'not useful', and 'best'.Return the correct number for terms 'disappointed', 'satisfied', 'decent', 'not useful', and 'best'.5
I need to contact a list of customers. Find the customer name and email with phone number 2058812302, 2137418080, 2065555555, 8015551212, and 555-229-3326.Return the correct name and email information for customers with each of the five phone numbers.5
I will need to update our webpage to create a more energetic vibe. Change the page title of '404 Not Found' to 'Bruh bro you clicked the wrong page', the page title of 'Enable Cookies' to 'Cookie monster coming to your place', the page title of 'Home Page' page to 'This is the home page!!', the page with title 'Privacy Policy' to 'No privacy policy is needed is this dystopian world', and lastly, change the page 'About Us' to 'Secret'.Change the page title correctly for each of the five pages.5
I need to generate a bunch of report to show to the store manager in an hour. Could you help me generate a sales order report for the last month, over the last 45 days, and for Q1? I'll also need a refund report for last year, and a tax report for this year. Today is 3/15/2023.Generate a sales report for 2/1/2023-2/29/2023; generate a sales report for 1/29/2023-3/15/2023; generate a sales report for 1/1/2023-3/15/2023; Generate a refund report for 1/1/2022-12/31/2022; Generate a tax report for 1/1/2023-3/15/2023.5
Tell me the SKU of products that have 10 units, 3 units, and 0 units left. Also, give me the product names that have 2-3 units left.Return the correct SKU for the first three questions; return the correct product names for the last question.4
+ +Scaled-Up Tasks We conduct t-tests between (i) ASI and AWM, (ii) ASI and VANILLA agent. From the results in Table 17, we find the advantage of ASI in success rate and efficiency improvements, when comparing to both AWM and VANILLA agents, are statistically significant, as indicated by all + +t-statistics with absolute values over 2 and p-value below 0.05. + +Table 10: Exemplar scaled-up browsing tasks on the shopping admin website. + +
Method PairSuccess Rate# Steps
t-statp-valuet-statp-value
ASI vs. AWM-2.36010.02262.76640.0068
ASI vs. VANILLA-4.09220.00022.19830.0296
+ +Cross-Web Tasks We conduct similar significance testing on cross-web tasks and report the results in Table 18. While ASI does not significantly outperform AWM in success rate, given the presumably greater flexibility of textual workflows, ASI still exhibits significant advantages on + +the efficiency side. Furthermore, comparing ASI to static VANILLA agents, ASI achieves significant improvements in both success rates and efficiency (i.e., reduced number of steps), suggested by $|t| > 2$ and $p < 0.05$ . + +Table 17: Results of significance testing on ASI's advantages for scaled-up web tasks. + +
Method PairSuccess Rate# Steps
t-statp-valuet-statp-value
ASI vs. AWM-1.39800.16732.12380.0378
ASI vs. VANILLA-3.59840.00072.57920.0125
+ +Table 18: Results of significance testing on ASI's advantages for cross-web tasks. + +
InstructionCheckpointsScore
I'm planning to organize multiple meetings in the next few months. Help me post notices on virtual meetings for the little women on Apr 10th, for Harry Potter in May 15th, and for Jane Eyre in Jan 30th, in the most suitable forums in PostMill.Post Apr 10th meetup; Post about May 15th meetup; Post Jan 30th meetup. All in book-related forums.3
Could you tell me all forums with names related to computer science?must include: deeplearning (1 pt), Machine-Learning (1 pt); optionally (get 1 score if include any): science, askscience, technology.3
Find the most relevant posts about jerseycity, newjersey, and nyc; and tell me how different they are.Correctly find post about jerseycity; Correctly find post about newjersey; Correctly find post about nyc; Answer how different they are.4
Thumbs down the top-2 posts in jerseycity, newjersey, and nyc forums, I don't like them.Thumbs down the top-2 posts in the jerseycity forum; Thumbs down the top-2 posts in the newjersey forum; Thumbs down the top-2 posts in the nyc forum.3
Reply "Thank you! This is super helpful!" to three posts about long-distance relationship advice.Reply to three posts with the correct message. Need to be relevant to long-distance relationship advice.3
+ +Table 11: Exemplar scaled-up tasks on the Postmill website. + +
InstructionCheckpointsScore
Display the list of issues in the a11yproject/a11yproject.com repository that have labels related to 'help needed', and assign the most recent one to the top contributor of this repository.Display the help-wanted issues; find the top contributor; assign him to the most recent help-needed issue.3
Set up a new, empty repository with the name agent_skill_induction, and create a MIT license file. Then, invite Abishek and Vinta as collaborators.Create a new repository with given name; Create a MIT license inside; Invite both collaborators.3
Start a private project web_agent.android xl with Android template and add primer, convexegg, abishek as members.Create the repository private and with Android template; Invite all three people as members.2
Add the following users to repo a11y-webring.club as developer: [abisubramanya27, lahwaacz], and [yjlou, a11yproject] as maintainer.Add abisubramanya27 and lahwaacz as developers; Add yjlou and a11yproject as maintainers.2
Add the following users [abisubramanya27, lahwaacz, yjlou, a11yproject] to repo a11y-webring.club, make sure to assign them different roles.Add abisubramanya27 with role 1; Add lahwaacz with role 2; Add yjlou with role 3; Add a11yproject as role 4. Role 1-4 need to be all different.4
+ +Table 12: Exemplar scaled-up tasks on the GitLab website. + +
InstructionCheckpointsScore
Search for the closest restaurants, cafes, parking, and banks to Carnegie Mellon University on the map.Return the closest restaurants; Return the closest cafes; Return the closest parking; Return the closest banks.4
I will need to go to multiple places from Carnegie Mellon University today, including the Univ of Pittsburgh, UPMC shady-side, the Schenley park, and Squirrel Hill. Could you should me the driving route to all those places?Show me driving route from CMU to UPitt; Show me driving route from CMU to UPMC; Show me driving route from CMU to Schenley Park; Show me driving route from CMU to Squirrel Hill.4
Show me the route of driving from CMU to University of Pittsburgh, then walking to the Schenley Park; next, bike to UPMC shadyside, and walk to Squirrel Hill after that.Show me CMU → Upitt route by car; Show me Upitt → Schenley Park route by foot; Show me Schenley Park → UPMC route by bike; Show me UPMC → Squirrel Hill route by foot.4
Check if the Univ of Pittsburgh, UPMC shadyside, schenley park, and squirrel hill can be reached within one hour by walking, if departing from Carnegie Mellon University.Return yes to route 1, route 2, route 3, and route 4.4
Tell me the coordinates of Univ of Pittsburgh, UPMC shadyside, schenley park, squirrel hill, and CMU in DD format.Return the coordinates of each of the four places.4
+ +Table 13: Exemplar scaled-up tasks on the Map website. + +
InstructionCheckpointsScore
Show me the options for Canon photo printer?Return the correct search result.1
I have a lot of Nintendo Switch game cards now, help me find the best storage option to fit all 11 cards.Return one valid product.1
What is the price range for beauty products?Return the correct price range.1
Show me products under $25 for woman shoesDisplay correct products.1
Show the least expensive shoe storage with a minimum storage capacity of 12 pairs.Display correct products.1
+ +Table 14: Exemplar shopping tasks on the target website. + +
InstructionCheckpointsScore
Tell me the names of books recommended in the latest five posts in the books forumFind the r/books forum; Find the most recent 5 posts; Give the correct answer.3
Tell me the titles of the 5 most recent posts about little women in the books forumFind the r/books forum; Find little women related posts; Sort the posts by newest.3
What are the recommended products for noise-canceling headphones within a bud-get of $200 in r/headphonesFind the r/headphones forum; Correctly search with noise-canceling, under $200 re-requirements; Return a valid headphone rec-ommendation.3
Find 3 pieces of advices about deal with long-distance relationships in a subreddit for relations.Navigate to a forum about relations; find at least 3 pieces of advice from relevant posts.2
Find if there are any jeep wrangler meetups. If so, when and where?Search in jeep wrangler related forums; Re-turn a valid answer based on the search result.2
+ +Table 15: Exemplar social forum tasks on the reddit website. + +
InstructionCheckpointsScore
Tell me the full address of all international airports that are within a driving distance of 30 miles to Carnegie Mellon UniversityReturn Pittsburgh International Airport.1
I will arrive Pittsburgh Airport soon. Provide the name of a Hilton hotel in the vicinity, if available. Then, tell me the the walking distance to the nearest supermarket own by a local company from the hotel.Show me the hotels; Find a nearby supermarket; Show me the walking route from the hotel to the supermarket.3
Show me the walking route from nearby hotels to CMU, Pittsburgh that take at most 5 minutes?Find a hotel that meets the walking time requirement; Show me the walking route.2
I am at CMU Pittsburgh, how long it takes to the nearest USPS postal office with different transportation methods?Return travel time by car, by foot, by bus, and by bike.4
Tell me the coordinates of Carnegie Mellon Cafe in DD format.Return the correct coordinates.1
+ +Table 16: Exemplar social forum tasks on the Google Maps website. \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06821/images/1148f878af9686e2ea67d104ffeb29c5fefb949586578f2979b3fc083583026f.jpg b/data/2025/2504_06xxx/2504.06821/images/1148f878af9686e2ea67d104ffeb29c5fefb949586578f2979b3fc083583026f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b0cdac1549d108d898e4b7e18c0742fb1c4fd109 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/1148f878af9686e2ea67d104ffeb29c5fefb949586578f2979b3fc083583026f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a0e29e2d29bd2349fbbb6791416f6f48d3ba8ec2360e55690fac45baf0bff73 +size 40647 diff --git a/data/2025/2504_06xxx/2504.06821/images/1aaa066e75e3a385f9f9d19b3acdea81acd51a20e825389f3c2d58675f3d0989.jpg b/data/2025/2504_06xxx/2504.06821/images/1aaa066e75e3a385f9f9d19b3acdea81acd51a20e825389f3c2d58675f3d0989.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a388f3fb44337d5c9d8d015d8f7dc19d1ec8dc5c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/1aaa066e75e3a385f9f9d19b3acdea81acd51a20e825389f3c2d58675f3d0989.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:401efc68ab871c34efdf23bce2712619275061f4d3678793af71599f4bd61b8e +size 87509 diff --git a/data/2025/2504_06xxx/2504.06821/images/1ff0af4eb9d2ef32997f69eebd000de698114a0aa8fdfd862945d1007cd5f3ec.jpg b/data/2025/2504_06xxx/2504.06821/images/1ff0af4eb9d2ef32997f69eebd000de698114a0aa8fdfd862945d1007cd5f3ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46783d979958f62e14dbe2f9aa458fed1e357560 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/1ff0af4eb9d2ef32997f69eebd000de698114a0aa8fdfd862945d1007cd5f3ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6b02b67abfb46f694d135abeda9696d86a6b968616fa22d1a6d6d4e3335bb0f4 +size 178045 diff --git a/data/2025/2504_06xxx/2504.06821/images/32b98aeaec3843cbaad2caf4552567bbfe930405b8409da2257233ba69eff45a.jpg b/data/2025/2504_06xxx/2504.06821/images/32b98aeaec3843cbaad2caf4552567bbfe930405b8409da2257233ba69eff45a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6a52f9ae6b431886115e8e9bb83b445cefb7bf0c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/32b98aeaec3843cbaad2caf4552567bbfe930405b8409da2257233ba69eff45a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74a79b8af8cdceade6f1cfbe64e530982fa09001c66f6e2403dc752264fe0d0a +size 53165 diff --git a/data/2025/2504_06xxx/2504.06821/images/32cefbcbef9f4c017763f3d35ba213c0ab16333e67dde8da90f90a5625d1eeb9.jpg b/data/2025/2504_06xxx/2504.06821/images/32cefbcbef9f4c017763f3d35ba213c0ab16333e67dde8da90f90a5625d1eeb9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..72d2143687f8a8f021a8f82c3a4af86449a518c8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/32cefbcbef9f4c017763f3d35ba213c0ab16333e67dde8da90f90a5625d1eeb9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ed95a0c28f890f0df44604f980d37ebfa0ea1c89b6ab3a2ede70e3ccf5ef26c +size 165638 diff --git a/data/2025/2504_06xxx/2504.06821/images/32d2941bc49fc2c302ec4d75a44393a68d9505c660e98d27b329443e04fd912a.jpg b/data/2025/2504_06xxx/2504.06821/images/32d2941bc49fc2c302ec4d75a44393a68d9505c660e98d27b329443e04fd912a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a56f0e398ae6529bf58b761c28897afc0770f4f1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/32d2941bc49fc2c302ec4d75a44393a68d9505c660e98d27b329443e04fd912a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9cfbc8501393636ec2d8a2a9594d65c3e5a2b95f5245b1fb5f7fed21ac7eb3b7 +size 34177 diff --git a/data/2025/2504_06xxx/2504.06821/images/3f8df25e83e327c28fe7fb5b61b7b295d304d82e52ae78b95dd96f11da1379ce.jpg b/data/2025/2504_06xxx/2504.06821/images/3f8df25e83e327c28fe7fb5b61b7b295d304d82e52ae78b95dd96f11da1379ce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e3dbecc3901571a5ee1532a27dbf3da41bd93e27 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/3f8df25e83e327c28fe7fb5b61b7b295d304d82e52ae78b95dd96f11da1379ce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20302cd6e4e476964f47efe59d859354b3afd1beee859f06cb9d11c0b7e87589 +size 60067 diff --git a/data/2025/2504_06xxx/2504.06821/images/495de9455d437d5fb7075c1e7ad702e4817af15dbe99fd41fe407351a3037267.jpg b/data/2025/2504_06xxx/2504.06821/images/495de9455d437d5fb7075c1e7ad702e4817af15dbe99fd41fe407351a3037267.jpg new file mode 100644 index 0000000000000000000000000000000000000000..875dbb23a9e1ddd47a3a8eb81bd0e0f6d2719f60 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/495de9455d437d5fb7075c1e7ad702e4817af15dbe99fd41fe407351a3037267.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64f664cd39358e3502692bb2fc315908da6b437b664fd67f826eb53874225122 +size 108356 diff --git a/data/2025/2504_06xxx/2504.06821/images/4f88725b8dc9876aae1dde5be6e27efcb666ca61c19b6ac75e2549b3303cabd2.jpg b/data/2025/2504_06xxx/2504.06821/images/4f88725b8dc9876aae1dde5be6e27efcb666ca61c19b6ac75e2549b3303cabd2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..10551d81d92b7b29fdd23dc3ac864355ebf376e3 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/4f88725b8dc9876aae1dde5be6e27efcb666ca61c19b6ac75e2549b3303cabd2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:203c11be61a9c11eaf075f0dfb8140750aadf0d2c8a4c42742dd4654d3b0519b +size 47535 diff --git a/data/2025/2504_06xxx/2504.06821/images/52294272a8d6f283c35d590bbc871337b4ac1a2662ec382ff8567094c2e77506.jpg b/data/2025/2504_06xxx/2504.06821/images/52294272a8d6f283c35d590bbc871337b4ac1a2662ec382ff8567094c2e77506.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e07e9e8fa986c3a818629eeb74b9bf16a5b8f3bc --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/52294272a8d6f283c35d590bbc871337b4ac1a2662ec382ff8567094c2e77506.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:166b845c14c3d78694a0392d875785012a061abfba0a7e1b0cf0327f94b8de8f +size 77718 diff --git a/data/2025/2504_06xxx/2504.06821/images/744d7a242930e30176d7727b1c46da76ad345db665e2c589a8713ff65ca7e1c6.jpg b/data/2025/2504_06xxx/2504.06821/images/744d7a242930e30176d7727b1c46da76ad345db665e2c589a8713ff65ca7e1c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..441dc03ad4a9bf07f36aafa0c27162cd0fe2218d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/744d7a242930e30176d7727b1c46da76ad345db665e2c589a8713ff65ca7e1c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:97282c876c298ba56fe4a44982fed80ef3290b847b8ae791105eadb76b0a9ac5 +size 174128 diff --git a/data/2025/2504_06xxx/2504.06821/images/802ab4997fb966f03cc6360d294c6427efd24e884407d8c1be5c83e3522e3a9a.jpg b/data/2025/2504_06xxx/2504.06821/images/802ab4997fb966f03cc6360d294c6427efd24e884407d8c1be5c83e3522e3a9a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..51743487ecba8fe451a607c69ed85df79e81e1e2 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/802ab4997fb966f03cc6360d294c6427efd24e884407d8c1be5c83e3522e3a9a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:35bf33f3495fa6abf858c4377c55b641adf09286345b83b03a3e2b69150d6ee0 +size 223260 diff --git a/data/2025/2504_06xxx/2504.06821/images/8109e663c1fd04718516750d3b79702b899db5b9302f97f72deea8dfb48e50d0.jpg b/data/2025/2504_06xxx/2504.06821/images/8109e663c1fd04718516750d3b79702b899db5b9302f97f72deea8dfb48e50d0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fd08357ab81ea4d7d766c4ca574f796f045d7969 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/8109e663c1fd04718516750d3b79702b899db5b9302f97f72deea8dfb48e50d0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07fa4b2e996aed3a7e41b93a85ba219d331f499b44b020b6ea660123091c04a4 +size 22554 diff --git a/data/2025/2504_06xxx/2504.06821/images/810aadc8162de8b670c598520349aa2a623d11538656c116f1a472d096254632.jpg b/data/2025/2504_06xxx/2504.06821/images/810aadc8162de8b670c598520349aa2a623d11538656c116f1a472d096254632.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1581dcabebe99273e6de6aef85d0a50454dfecf1 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/810aadc8162de8b670c598520349aa2a623d11538656c116f1a472d096254632.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2aca15843d69c8938756a33530acf8563b717ad07fbd440ba9504a4d806d5797 +size 115767 diff --git a/data/2025/2504_06xxx/2504.06821/images/a48f320b1a2f55cba3570d504b013d6a4c5591b774509c0aaa4f65a44c16948a.jpg b/data/2025/2504_06xxx/2504.06821/images/a48f320b1a2f55cba3570d504b013d6a4c5591b774509c0aaa4f65a44c16948a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2d37e52af83e11189f3f0dcc3e01e5f9067b702d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/a48f320b1a2f55cba3570d504b013d6a4c5591b774509c0aaa4f65a44c16948a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ddc43d9f276cb05317a11487e9d5682d8233f7e7bda05f7a0103a34be7645e8 +size 22607 diff --git a/data/2025/2504_06xxx/2504.06821/images/a5be42cba2d45ce05778e8e19e0b347f9774925e80aff099ef819a4fa5a5e123.jpg b/data/2025/2504_06xxx/2504.06821/images/a5be42cba2d45ce05778e8e19e0b347f9774925e80aff099ef819a4fa5a5e123.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1dfb0b62408737204dda286c2c48e132bc269fc8 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/a5be42cba2d45ce05778e8e19e0b347f9774925e80aff099ef819a4fa5a5e123.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aec9afa9912b966918a3b403f3ca369a885578ec690b8471a46d850f04552428 +size 30891 diff --git a/data/2025/2504_06xxx/2504.06821/images/b55e238f84562ee1941db044ca264e0042cc5add90554d1fc6dd8b5509f25b8a.jpg b/data/2025/2504_06xxx/2504.06821/images/b55e238f84562ee1941db044ca264e0042cc5add90554d1fc6dd8b5509f25b8a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0248e2ae8d7607bb7dae67f858b1d7df9a5da2e0 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/b55e238f84562ee1941db044ca264e0042cc5add90554d1fc6dd8b5509f25b8a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc3116de45bbecda0bd44d8f9daf83039e2d19741fa59b582ba7b8de4616188d +size 23467 diff --git a/data/2025/2504_06xxx/2504.06821/images/d71556dcaf97c5216a9666f1fe442438dcc1304ce600f3c7a086e9010778a15b.jpg b/data/2025/2504_06xxx/2504.06821/images/d71556dcaf97c5216a9666f1fe442438dcc1304ce600f3c7a086e9010778a15b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cfb97e024d32e3e6806ffce9d123e4e9510c838d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/d71556dcaf97c5216a9666f1fe442438dcc1304ce600f3c7a086e9010778a15b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5633ea017b6c40d91098bc1a0b296b04fe4e70f42490eea747239c7436769a6a +size 127659 diff --git a/data/2025/2504_06xxx/2504.06821/images/e184163a935c302d2edc322fe07a755bc00b82f9292ce321df60b7e979ab1931.jpg b/data/2025/2504_06xxx/2504.06821/images/e184163a935c302d2edc322fe07a755bc00b82f9292ce321df60b7e979ab1931.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4264ee512befda83aa0c294105b9df8647b33f0d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/e184163a935c302d2edc322fe07a755bc00b82f9292ce321df60b7e979ab1931.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0472728905e091f15c482602510bb62fa40d30140ea9ddb6d0523ced24b076e7 +size 62451 diff --git a/data/2025/2504_06xxx/2504.06821/images/f55cd46683e3679b683a331e175b0d050927180d5bf9ae39211680600432ba2f.jpg b/data/2025/2504_06xxx/2504.06821/images/f55cd46683e3679b683a331e175b0d050927180d5bf9ae39211680600432ba2f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b9e9ef70ab64f09451e8edda06228e56a67842c5 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/f55cd46683e3679b683a331e175b0d050927180d5bf9ae39211680600432ba2f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:11e3381e5463735bf8309b3aba9dbae41b9c1c307a0fe604847ae27289e37323 +size 162587 diff --git a/data/2025/2504_06xxx/2504.06821/images/f7a8b6011454d378ab9e8ea447740255b4e8ead0c6b591b4e2c4a6fda28334ed.jpg b/data/2025/2504_06xxx/2504.06821/images/f7a8b6011454d378ab9e8ea447740255b4e8ead0c6b591b4e2c4a6fda28334ed.jpg new file mode 100644 index 0000000000000000000000000000000000000000..59a3171402cf697b6201286b6ef9f5187fabe014 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/f7a8b6011454d378ab9e8ea447740255b4e8ead0c6b591b4e2c4a6fda28334ed.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:671ce32acf78243f9145fc0d0e18451959d30604a16b1089ddd7277a2f959f52 +size 131039 diff --git a/data/2025/2504_06xxx/2504.06821/images/fb0d1499b20375bbf7fa5c5dc65ac574e4f19e2c831ae86c1eae513719edaffe.jpg b/data/2025/2504_06xxx/2504.06821/images/fb0d1499b20375bbf7fa5c5dc65ac574e4f19e2c831ae86c1eae513719edaffe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dde8eade4162c30771bde2b7a4373e35b3713640 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/images/fb0d1499b20375bbf7fa5c5dc65ac574e4f19e2c831ae86c1eae513719edaffe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46bb6750286add31567968d9e3058b831a587c451de160994486f68e9e94ead9 +size 20651 diff --git a/data/2025/2504_06xxx/2504.06821/layout.json b/data/2025/2504_06xxx/2504.06821/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..6f93cff3e886b3ca01bcf3f150320b92c22b4280 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06821/layout.json @@ -0,0 +1,11389 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 79, + 424, + 97 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 79, + 424, + 97 + ], + "spans": [ + { + "bbox": [ + 105, + 79, + 424, + 97 + ], + "type": "text", + "content": "Inducing Programmatic Skills for Agentic Tasks" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 116, + 438, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 116, + 438, + 129 + ], + "spans": [ + { + "bbox": [ + 110, + 116, + 438, + 129 + ], + "type": "text", + "content": "Zora Zhiruo Wang Apurva Gandhi Graham Neubig Daniel Fried" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 112, + 129, + 237, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 129, + 237, + 140 + ], + "spans": [ + { + "bbox": [ + 112, + 129, + 237, + 140 + ], + "type": "text", + "content": "Carnegie Mellon University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 112, + 140, + 329, + 151 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 140, + 329, + 151 + ], + "spans": [ + { + "bbox": [ + 112, + 140, + 329, + 151 + ], + "type": "text", + "content": "{zhiruow,apurvag,gneubig,dfried}@cs.cmu.edu" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 280, + 167, + 331, + 180 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 167, + 331, + 180 + ], + "spans": [ + { + "bbox": [ + 280, + 167, + 331, + 180 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 140, + 193, + 471, + 395 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 193, + 471, + 395 + ], + "spans": [ + { + "bbox": [ + 140, + 193, + 471, + 395 + ], + "type": "text", + "content": "To succeed in common digital tasks such as web navigation, agents must carry out a variety of specialized tasks such as searching for products or planning a travel route. To tackle these tasks, agents can bootstrap themselves by learning task-specific skills online through interaction with the web environment. In this work, we demonstrate that programs are an effective representation for skills. We propose agent skill induction (ASI), which allows agents to adapt themselves by inducing, verifying, and utilizing program-based skills on the fly. We start with an evaluation on the WebArena agent benchmark and show that ASI outperforms the static baseline agent and its text-skill counterpart by " + }, + { + "bbox": [ + 140, + 193, + 471, + 395 + ], + "type": "inline_equation", + "content": "23.5\\%" + }, + { + "bbox": [ + 140, + 193, + 471, + 395 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 140, + 193, + 471, + 395 + ], + "type": "inline_equation", + "content": "11.3\\%" + }, + { + "bbox": [ + 140, + 193, + 471, + 395 + ], + "type": "text", + "content": " in success rate, mainly thanks to the programmatic verification guarantee during the induction phase. ASI also improves efficiency by reducing " + }, + { + "bbox": [ + 140, + 193, + 471, + 395 + ], + "type": "inline_equation", + "content": "10.7 - 15.3\\%" + }, + { + "bbox": [ + 140, + 193, + 471, + 395 + ], + "type": "text", + "content": " of the steps over baselines, by composing primitive actions (e.g., click) into higher-level skills (e.g., search_product). We then highlight the efficacy of ASI in remaining efficient and accurate under scaled-up web activities. Finally, we examine the generalizability of induced skills when transferring between websites, and find that ASI can effectively reuse common skills, while also updating incompatible skills to versatile website changes." + }, + { + "bbox": [ + 140, + 193, + 471, + 395 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 414, + 196, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 414, + 196, + 426 + ], + "spans": [ + { + "bbox": [ + 105, + 414, + 196, + 426 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 436, + 506, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 436, + 506, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 436, + 506, + 548 + ], + "type": "text", + "content": "To achieve success in common digital tasks such as web navigation, it is essential for agents to be able to perform a variety of specialized tasks such as searching for products on a shopping website (Yao et al., 2022; Deng et al., 2024) or finding a driving route on the map (Zhou et al., 2024a; Xie et al., 2024a). While one source for agents to learn such tasks is demonstrations annotated by humans (Deng et al., 2024) or synthesized with large language models (LMs) on websites of interest (Murty et al., 2024b;a), this can be a challenging offline learning procedure given the broad range of website domains and functionalities, especially for the collected demonstrations to match or cover the distribution of tasks queried at inference time (Zhou et al., 2024b); not to mention the limitations in resources to collect abundant high-quality data at ease (Pan et al., 2024)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 552, + 507, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 507, + 663 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 507, + 663 + ], + "type": "text", + "content": "Instead of learning from demonstrations offline, an alternative way is to learn these tasks directly online from test queries to prevent potential distribution mismatch between demonstration and downstream tasks (Levine et al., 2020). Some works propose to have agents induce casual abstractions (Majumder et al., 2024), single-state guidelines (Fu et al., 2024), or multi-step procedural workflows (Sarch et al., 2024; Wang et al., 2024b) as a form of intermediate knowledge to augment agent memory via non-parametric approaches (Brown et al., 2020). Nonetheless, most existing approaches represent this knowledge in text, offering limited quality and verification guarantees. In this work, we propose that executable programs are effective representations for intermediate skill acquisition, given their verifiability and composability advantages (Setlur et al., 2025)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 667, + 506, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 667, + 506, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 667, + 506, + 715 + ], + "type": "text", + "content": "We present ASI, namely agent skill induction (§2), that induces and applies programmatic skills along the process of solving user web navigation queries. More concretely, given a natural language (NL) query, the agent first generates an action trajectory attempting to solve the task using built-in, primitive actions such as click and scroll. The agent then" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 719, + 331, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 719, + 331, + 733 + ], + "spans": [ + { + "bbox": [ + 116, + 719, + 331, + 733 + ], + "type": "text", + "content": "1https://github.com/zorazrw/agent-skill-induction" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 14, + 218, + 37, + 574 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 218, + 37, + 574 + ], + "spans": [ + { + "bbox": [ + 14, + 218, + 37, + 574 + ], + "type": "text", + "content": "arXiv:2504.06821v2 [cs.CL] 29 Aug 2025" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "content": "induces higher-level skills (e.g., search_product(name)) that wrap primitive actions or prior skills as executable programs, accompanied with corresponding test trajectories to verify their quality. Verified skills are then incorporated into the agent action space and can be directly called to solve future tasks with similar procedures, as depicted in Figure 1 (bottom)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 132, + 279, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 132, + 279, + 364 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 279, + 364 + ], + "type": "text", + "content": "We first evaluate ASI on the WebArena benchmark (Zhou et al., 2024a) (§3) and demonstrate that our online, adaptive ASI surpasses its static agent baseline without adaptive components by " + }, + { + "bbox": [ + 104, + 132, + 279, + 364 + ], + "type": "inline_equation", + "content": "23.5\\%" + }, + { + "bbox": [ + 104, + 132, + 279, + 364 + ], + "type": "text", + "content": " in success rate. To validate the advantage of using programmatic representations for skills, we further compare to an adaptive agent, AWM (Wang et al., 2024b), that represents skills in memory as non-executable texts (Figure 1 top); we find ASI scores " + }, + { + "bbox": [ + 104, + 132, + 279, + 364 + ], + "type": "inline_equation", + "content": "11.3\\%" + }, + { + "bbox": [ + 104, + 132, + 279, + 364 + ], + "type": "text", + "content": " higher success rate by employing verifiable, programmatic skills (Figure 1 bottom). Beyond the correctness aspect, the task-solving procedures by ASI-supported agents are " + }, + { + "bbox": [ + 104, + 132, + 279, + 364 + ], + "type": "inline_equation", + "content": "10.7 - 15.3\\%" + }, + { + "bbox": [ + 104, + 132, + 279, + 364 + ], + "type": "text", + "content": " more efficient than the baseline approaches, mainly because of the action space abstraction and composition enabled by the programmatic skill representation." + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 284, + 133, + 503, + 319 + ], + "blocks": [ + { + "bbox": [ + 284, + 133, + 503, + 319 + ], + "lines": [ + { + "bbox": [ + 284, + 133, + 503, + 319 + ], + "spans": [ + { + "bbox": [ + 284, + 133, + 503, + 319 + ], + "type": "image", + "image_path": "e184163a935c302d2edc322fe07a755bc00b82f9292ce321df60b7e979ab1931.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 282, + 325, + 504, + 361 + ], + "lines": [ + { + "bbox": [ + 282, + 325, + 504, + 361 + ], + "spans": [ + { + "bbox": [ + 282, + 325, + 504, + 361 + ], + "type": "text", + "content": "Figure 1: Online adaptive agent that induces and reuses programmatic skills as actions (bottom), as opposed to adding textual skills in memory (top)." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 368, + 506, + 437 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 368, + 506, + 437 + ], + "spans": [ + { + "bbox": [ + 104, + 368, + 506, + 437 + ], + "type": "text", + "content": "We further stress test ASI on scaled-up scenarios (S4) that require substantially longer-horizon trajectories to complete the task. Across various domains such as shopping and social forums, we find the efficiency advantage offered by ASI is more pronounced, reducing action trajectories by 9.5 and 5.6 average steps compared to static and text-form agents. Facilitated by this improved procedural efficiency and planning, we find that ASI agent adheres to the optimal trajectory better and achieves tasks " + }, + { + "bbox": [ + 104, + 368, + 506, + 437 + ], + "type": "inline_equation", + "content": "20.7 - 38.9\\%" + }, + { + "bbox": [ + 104, + 368, + 506, + 437 + ], + "type": "text", + "content": " more correctly." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 440, + 507, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 440, + 507, + 509 + ], + "spans": [ + { + "bbox": [ + 104, + 440, + 507, + 509 + ], + "type": "text", + "content": "Finally, we study agent behaviors on generalizing induced skills to other websites (§5), particularly from sandboxed, simulated websites to real-world websites of similar domains. While ASI agents effectively transfer common skills (e.g., search_product) to new websites, some skills may be incompatible with alternative website designs hence less useful. Nonetheless, ASI can quickly refine its prior skills or create new skills on new websites, indicating it allows agents to adapt online while maintaining verifiability via programs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 511, + 504, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 511, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 511, + 504, + 536 + ], + "type": "text", + "content": "In short, ASI enhances web agent success and efficiency by inducing and applying verifiable programmatic skills, in general and longer-horizon tasks, even across varied websites." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 550, + 246, + 565 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 550, + 246, + 565 + ], + "spans": [ + { + "bbox": [ + 105, + 550, + 246, + 565 + ], + "type": "text", + "content": "2 Agent Skill Induction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 572, + 506, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 572, + 506, + 607 + ], + "spans": [ + { + "bbox": [ + 104, + 572, + 506, + 607 + ], + "type": "text", + "content": "In this section, we first lay out the web agent problem setup (§2.1) and introduce online, self-adaptive agents (§2.2). We then describe the core component of ASI—programmatic skill induction and verification (§2.3)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 620, + 331, + 633 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 620, + 331, + 633 + ], + "spans": [ + { + "bbox": [ + 104, + 620, + 331, + 633 + ], + "type": "text", + "content": "2.1 Problem Statement: Online Adaptive Agent" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 637, + 504, + 706 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 637, + 504, + 706 + ], + "spans": [ + { + "bbox": [ + 104, + 637, + 504, + 706 + ], + "type": "text", + "content": "For the scope of this work, we focus on language model (LM) based agents, where each agent policy consists of an LM backbone " + }, + { + "bbox": [ + 104, + 637, + 504, + 706 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 637, + 504, + 706 + ], + "type": "text", + "content": ", a memory " + }, + { + "bbox": [ + 104, + 637, + 504, + 706 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 637, + 504, + 706 + ], + "type": "text", + "content": ", and a skill library " + }, + { + "bbox": [ + 104, + 637, + 504, + 706 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 104, + 637, + 504, + 706 + ], + "type": "text", + "content": ", as illustrated in Figure 1 top and bottom. In the implementation, the memory " + }, + { + "bbox": [ + 104, + 637, + 504, + 706 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 637, + 504, + 706 + ], + "type": "text", + "content": " and the skill library " + }, + { + "bbox": [ + 104, + 637, + 504, + 706 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 104, + 637, + 504, + 706 + ], + "type": "text", + "content": " are provided as input context to the LM backbone. We denote the agent policy as " + }, + { + "bbox": [ + 104, + 637, + 504, + 706 + ], + "type": "inline_equation", + "content": "\\pi_{\\mathcal{L}}(\\cdot|\\mathcal{M},\\mathcal{A})" + }, + { + "bbox": [ + 104, + 637, + 504, + 706 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 637, + 504, + 706 + ], + "type": "inline_equation", + "content": "\\pi_{\\mathcal{L}}" + }, + { + "bbox": [ + 104, + 637, + 504, + 706 + ], + "type": "text", + "content": " for short. We focus on the web browser environment defined by a transition function " + }, + { + "bbox": [ + 104, + 637, + 504, + 706 + ], + "type": "inline_equation", + "content": "\\mathcal{T}(s'|s,a)" + }, + { + "bbox": [ + 104, + 637, + 504, + 706 + ], + "type": "text", + "content": " that models the change in the webpage after an action." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "text", + "content": "We focus on an online adaptation scenario where we have access to a sequence of NL queries " + }, + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "inline_equation", + "content": "Q = \\{q_{1}, q_{2}, \\dots, q_{N}\\}" + }, + { + "bbox": [ + 104, + 709, + 504, + 734 + ], + "type": "text", + "content": " specifying the tasks, and no other information such as demonstration" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": "trajectories or ground-truth rewards are available (Wang et al., 2024a,b). For each task specified by a natural language (NL) query " + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": ", the agent generates a trajectory of actions " + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "inline_equation", + "content": "\\tau = (s_0, a_0, s_1, a_1, \\dots, s_{H-1}, a_{H-1}, s_H)" + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": " for a finite number of " + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": " steps. At each time step " + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "inline_equation", + "content": "h" + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": " in the horizon, the agent receives observation " + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "inline_equation", + "content": "o_h" + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": " from the current state " + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "inline_equation", + "content": "s_h" + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": ", and generates an action " + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "inline_equation", + "content": "a_h \\in \\mathcal{A}" + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": " based on the observations and actions so far, via " + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "inline_equation", + "content": "\\pi_{\\mathcal{L}}(o_{0:h}, a_{0:h-1}; \\mathcal{M}, \\mathcal{A}) \\to a_h" + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": ". The generated action will be executed on the environment and incurs a state change " + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "inline_equation", + "content": "\\mathcal{T}(s_h, a_h) \\to s_{h+1}" + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": ". This observe-act loop continues for " + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "inline_equation", + "content": "H" + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": " steps until the task reaches a task-terminating condition, such as the agent generating a termination action (e.g., send msg_to_user) or the horizon reaches a pre-determined maximum number of steps " + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "inline_equation", + "content": "h = H_{max}" + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": ". We denote each pair of query and trajectory " + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "inline_equation", + "content": "(q, \\tau) := e" + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": " as an episode " + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": ". Agents can update the content in " + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 104, + 82, + 506, + 204 + ], + "type": "text", + "content": " and reuse them across episodes." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 219, + 246, + 231 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 219, + 246, + 231 + ], + "spans": [ + { + "bbox": [ + 105, + 219, + 246, + 231 + ], + "type": "text", + "content": "2.2 Inducing Reusable Skills" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 237, + 504, + 307 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 237, + 504, + 307 + ], + "spans": [ + { + "bbox": [ + 104, + 237, + 504, + 307 + ], + "type": "text", + "content": "To realize online adaptive agents, one common approach is to induce skills from correct trajectories to update the agent (Wang et al., 2024b). But since ground-truth rewards are unavailable, an LLM-based evaluator " + }, + { + "bbox": [ + 104, + 237, + 504, + 307 + ], + "type": "inline_equation", + "content": "V_{\\mathcal{L}}" + }, + { + "bbox": [ + 104, + 237, + 504, + 307 + ], + "type": "text", + "content": " is often used to judge the correctness of episodes. Formally, from the total of " + }, + { + "bbox": [ + 104, + 237, + 504, + 307 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 237, + 504, + 307 + ], + "type": "text", + "content": " episodes throughout the online process " + }, + { + "bbox": [ + 104, + 237, + 504, + 307 + ], + "type": "inline_equation", + "content": "\\{e^1,\\dots ,e^N\\} \\coloneqq \\mathcal{E}" + }, + { + "bbox": [ + 104, + 237, + 504, + 307 + ], + "type": "text", + "content": ", we employ an LM-based evaluator " + }, + { + "bbox": [ + 104, + 237, + 504, + 307 + ], + "type": "inline_equation", + "content": "V_{\\mathcal{L}}(e)\\to 0 / 1" + }, + { + "bbox": [ + 104, + 237, + 504, + 307 + ], + "type": "text", + "content": " to filter out the episodes predicted as correct " + }, + { + "bbox": [ + 104, + 237, + 504, + 307 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_V = \\{e_i\\in \\mathcal{E}|V_{\\mathcal{L}}(e_i) = 1,i\\in \\{1,\\dots ,N\\} \\}" + }, + { + "bbox": [ + 104, + 237, + 504, + 307 + ], + "type": "text", + "content": " and perform skill induction only on " + }, + { + "bbox": [ + 104, + 237, + 504, + 307 + ], + "type": "inline_equation", + "content": "\\mathcal{E}_V" + }, + { + "bbox": [ + 104, + 237, + 504, + 307 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "spans": [ + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "text", + "content": "Central to our adaptive agents is an induction component " + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "text", + "content": " that enables the adaptivity of agents, which can be rule-based (Ellis et al., 2023; Grand et al., 2024) or instantiated by an LM " + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "inline_equation", + "content": "I(\\cdot |LM)" + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "text", + "content": " (Wang et al., 2024b); we follow the latter for its better performance and use " + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "text", + "content": " to represent the module for simplicity. For online adaptive agents " + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "inline_equation", + "content": "\\pi_{\\mathcal{L}}" + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "text", + "content": ", to induce skills, " + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "text", + "content": " is instructed to take in one filtered episode " + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "text", + "content": " and output one or more pieces of desired skills " + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "inline_equation", + "content": "D = \\{d\\}" + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "text", + "content": ", denoted as " + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "inline_equation", + "content": "I(e)\\to \\mathcal{D}" + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "text", + "content": ". Following AWM (Wang et al., 2024b), we update the agent in non-parametric ways that incorporate the induction outcome " + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "inline_equation", + "content": "I(e_t)\\rightarrow d_t" + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "text", + "content": " into the agent, instead of updating the parameters of the underlying LM backbone " + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "inline_equation", + "content": "\\mathcal{L}" + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "text", + "content": " for agent policy " + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "inline_equation", + "content": "\\pi_{\\mathcal{L}}" + }, + { + "bbox": [ + 104, + 311, + 506, + 401 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 404, + 504, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 404, + 504, + 450 + ], + "spans": [ + { + "bbox": [ + 104, + 404, + 504, + 450 + ], + "type": "text", + "content": "Unlike AWM which represents skills in free-form text representations and can only augment agent memory via " + }, + { + "bbox": [ + 104, + 404, + 504, + 450 + ], + "type": "inline_equation", + "content": "\\mathcal{M}_t \\cup \\{d_t\\} \\to \\mathcal{M}_{t+1}" + }, + { + "bbox": [ + 104, + 404, + 504, + 450 + ], + "type": "text", + "content": " (Figure 1 top), we introduce ASI that represents skills as executable python programs, and directly integrate skills into the agent action space instead, via " + }, + { + "bbox": [ + 104, + 404, + 504, + 450 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_t \\cup \\{d_t\\} \\to \\mathcal{A}_{t+1}" + }, + { + "bbox": [ + 104, + 404, + 504, + 450 + ], + "type": "text", + "content": " (Figure 1 bottom)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 464, + 332, + 477 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 332, + 477 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 332, + 477 + ], + "type": "text", + "content": "2.3 Inducing and Verifying Programmatic Skills" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 483, + 504, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 483, + 504, + 539 + ], + "spans": [ + { + "bbox": [ + 104, + 483, + 504, + 539 + ], + "type": "text", + "content": "To improve the induction quality, we propose a change in representation from free-form text to executable programs, which offers advantages in correctness and efficiency. For one, the program format enables ready verification on skill correctness by executing them; for another, skill programs abstract multiple lower-level actions into a higher-level function call, thus agents can solve tasks in fewer steps without tackling tricky low-level details." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 544, + 506, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 544, + 506, + 622 + ], + "spans": [ + { + "bbox": [ + 104, + 544, + 506, + 622 + ], + "type": "text", + "content": "Inducing Programmatic Skills We first clean the input episodes to ensure the induction quality. We remove all the steps that cause execution errors such as invalid argument format, to keep these invalid actions from distracting agent predictions. Furthermore, noticing the long and possibly redundant thought process generated by agents along with each action, we simplify each thought text paragraph into a short one-sentence description (e.g., \"Clicked the directions button to access the route planning feature\") using LM, effectively reducing the thought content from 87.9 to 13.4 tokens per step." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 626, + 506, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 626, + 506, + 683 + ], + "spans": [ + { + "bbox": [ + 104, + 626, + 506, + 683 + ], + "type": "text", + "content": "Given a clean input episode " + }, + { + "bbox": [ + 104, + 626, + 506, + 683 + ], + "type": "inline_equation", + "content": "e" + }, + { + "bbox": [ + 104, + 626, + 506, + 683 + ], + "type": "text", + "content": ", we now prompt the induction module " + }, + { + "bbox": [ + 104, + 626, + 506, + 683 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 626, + 506, + 683 + ], + "type": "text", + "content": " to produce one or more program functions to represent reusable skills " + }, + { + "bbox": [ + 104, + 626, + 506, + 683 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\{d\\}" + }, + { + "bbox": [ + 104, + 626, + 506, + 683 + ], + "type": "text", + "content": " as executable programs. As exemplified in Figure 2, given the input episode on the left side, the induction module first produces two skills open网络营销reviews() and searchreviews_search_box_id, search_button_id, search_term) in the form of callable program functions." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "content": "Skill Verification With the programmatic nature of ASI's skills, we can readily verify their correctness by executing them and checking if tasks can be solved successfully. While a naive way is to query the agent with the same NL query and allow it to use newly induced skill actions, we find agents may not always use new skills due to the large search space of" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 119, + 68, + 492, + 182 + ], + "blocks": [ + { + "bbox": [ + 119, + 68, + 492, + 182 + ], + "lines": [ + { + "bbox": [ + 119, + 68, + 492, + 182 + ], + "spans": [ + { + "bbox": [ + 119, + 68, + 492, + 182 + ], + "type": "image", + "image_path": "3f8df25e83e327c28fe7fb5b61b7b295d304d82e52ae78b95dd96f11da1379ce.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 116, + 186, + 493, + 200 + ], + "lines": [ + { + "bbox": [ + 116, + 186, + 493, + 200 + ], + "spans": [ + { + "bbox": [ + 116, + 186, + 493, + 200 + ], + "type": "text", + "content": "Figure 2: Inducing programmatic skills and rewriting the trajectory from an episode." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "text", + "content": "possible action trajectories. To have agents more efficiently generate trajectories that test skills in a more targeted way, we curate a rewritten trajectory prefix " + }, + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "inline_equation", + "content": "\\tau_{D}" + }, + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "text", + "content": " to constrain the first few steps executed in the environment, by rewriting and truncating the input action trajectory " + }, + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "text", + "content": ", and subsequently asking the agent to complete the prefix to get a full, checkable trajectory " + }, + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "inline_equation", + "content": "\\tau_{f}" + }, + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "text", + "content": ". Concretely, we first take the original action trajectory in the input episode " + }, + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "text", + "content": " (consisting of primitive actions or previously learned skills), and ask the induction module " + }, + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "text", + "content": " to transform it to a skill-using trajectory (Figure 2 bottom right), by replacing sub-trajectories in " + }, + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "text", + "content": " with calls to the newly induced skill programs " + }, + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "text", + "content": ", if possible. Zooming into the Figure 2 example, this procedure merges click('Marketing') " + }, + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "text", + "content": " click('All Reviews') to an open_marketreviews() call; transforms fill(757, 'satisfied') " + }, + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 213, + 506, + 392 + ], + "type": "text", + "content": " click('Search') to a call of the second skill searchreviews('satisfied') with the specified term 'satisfied'; and adopted the last send msg_to_user('2') step directly. Note that we follow Wang et al. (2024b) and induce skills according to each website, so some skills could be tailored to particular webpage contexts such as the 'Marketing' and 'All Reviews' link constants in open_marketreviews, while other skills apply to more versatile setups such as searching for different reviews in searchreviews." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 396, + 504, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 396, + 504, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 396, + 504, + 475 + ], + "type": "text", + "content": "Next, to avoid spurious successes in skill verification, we truncate the trajectory yielded above by removing any trailing primitive actions after the last call to a skill program. Taking Figure 2 as an example, in the original input trajectory, the last send msg_to_user('2') already sends over the correct answer '2' to the user. However, if we directly adopt this last step into the skill-using trajectory " + }, + { + "bbox": [ + 104, + 396, + 504, + 475 + ], + "type": "inline_equation", + "content": "\\tau_{D}" + }, + { + "bbox": [ + 104, + 396, + 504, + 475 + ], + "type": "text", + "content": ", then executing it will always return the correct message to the user, regardless of whether the previous skill calls are valid. We thus remove such trailing actions to make sure verification attends to the induced skills we are testing." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "spans": [ + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "text", + "content": "After rewriting and truncation, we get this skill-using trajectory " + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "inline_equation", + "content": "\\tau_{D}" + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "text", + "content": " as the prefix to test skills. We now query the agent " + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "text", + "content": " again with the same NL query " + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "text", + "content": " and first execute " + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "inline_equation", + "content": "\\tau_{D}" + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "text", + "content": " on the environment. We then allow agents to continue generating up to " + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "inline_equation", + "content": "H_{max} - |\\tau_{D}|" + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "text", + "content": " actions to finish the task. In the Figure 2 example, to successfully solve query " + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "text", + "content": ", we expect the agent to generate another step of send msg_to_user(...). with the correct answer '2' in the message. We take the concatenation of the trajectory prefix " + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "inline_equation", + "content": "\\tau_{D}" + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "text", + "content": " and the later additionally produced new steps " + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "inline_equation", + "content": "\\tau_{A}" + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "text", + "content": " (e.g., [send msg_to_user('2')]) as the full trajectory " + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "inline_equation", + "content": "\\tau_{f}" + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "text", + "content": ". We then decide whether to add the induced programs " + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "text", + "content": " into the agent skill library as " + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "text", + "content": " by examining " + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "inline_equation", + "content": "\\tau_{f}" + }, + { + "bbox": [ + 104, + 479, + 506, + 571 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 575, + 506, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 575, + 506, + 647 + ], + "spans": [ + { + "bbox": [ + 104, + 575, + 506, + 647 + ], + "type": "text", + "content": "Specifically, we check " + }, + { + "bbox": [ + 104, + 575, + 506, + 647 + ], + "type": "inline_equation", + "content": "\\tau_{f}" + }, + { + "bbox": [ + 104, + 575, + 506, + 647 + ], + "type": "text", + "content": " from three dimensions: (1) Correctness: if executing " + }, + { + "bbox": [ + 104, + 575, + 506, + 647 + ], + "type": "inline_equation", + "content": "\\tau_{f}" + }, + { + "bbox": [ + 104, + 575, + 506, + 647 + ], + "type": "text", + "content": " successfully solves the task " + }, + { + "bbox": [ + 104, + 575, + 506, + 647 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 575, + 506, + 647 + ], + "type": "text", + "content": " as judged by the neural model evaluator " + }, + { + "bbox": [ + 104, + 575, + 506, + 647 + ], + "type": "inline_equation", + "content": "V_{\\mathcal{L}}" + }, + { + "bbox": [ + 104, + 575, + 506, + 647 + ], + "type": "text", + "content": "; (2) Skill Usage: if the trajectory contains at least one call to at least one new skill in " + }, + { + "bbox": [ + 104, + 575, + 506, + 647 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 104, + 575, + 506, + 647 + ], + "type": "text", + "content": "; and (3) Skill Validity: if all skill-calling actions cause environment changes. If all three boxes are checked, we add the skills being called in the trajectory " + }, + { + "bbox": [ + 104, + 575, + 506, + 647 + ], + "type": "inline_equation", + "content": "\\tau_{f}" + }, + { + "bbox": [ + 104, + 575, + 506, + 647 + ], + "type": "text", + "content": " to the agent skill library " + }, + { + "bbox": [ + 104, + 575, + 506, + 647 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{t} \\cup \\mathcal{D}_{\\text{called}} \\rightarrow \\mathcal{A}_{t+1}" + }, + { + "bbox": [ + 104, + 575, + 506, + 647 + ], + "type": "text", + "content": ". By adding " + }, + { + "bbox": [ + 104, + 575, + 506, + 647 + ], + "type": "inline_equation", + "content": "\\mathcal{D}_{\\text{called}}" + }, + { + "bbox": [ + 104, + 575, + 506, + 647 + ], + "type": "text", + "content": ", the agent can now generate actions that call these skill programs to solve subsequent tasks." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 662, + 334, + 677 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 662, + 334, + 677 + ], + "spans": [ + { + "bbox": [ + 104, + 662, + 334, + 677 + ], + "type": "text", + "content": "3 General Web Navigation Performance" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 689, + 212, + 700 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 689, + 212, + 700 + ], + "spans": [ + { + "bbox": [ + 105, + 689, + 212, + 700 + ], + "type": "text", + "content": "3.1 Experiment Setup" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 709, + 505, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 709, + 505, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 709, + 505, + 734 + ], + "type": "text", + "content": "Benchmark and Evaluation To evaluate ASI on general web navigation scenarios, we adopt the WebArena benchmark (Zhou et al., 2024a) that contains 812 test examples covering" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 109, + 71, + 500, + 149 + ], + "blocks": [ + { + "bbox": [ + 109, + 71, + 500, + 149 + ], + "lines": [ + { + "bbox": [ + 109, + 71, + 500, + 149 + ], + "spans": [ + { + "bbox": [ + 109, + 71, + 500, + 149 + ], + "type": "table", + "html": "
ModelMethod# StepsSRShopAdminRedditGitLabMapsMulti
GPTVanilla-12.313.910.46.615.015.68.3
AWM5.935.532.129.154.735.042.218.8
ClaudeVanilla5.632.732.636.836.826.138.520.8
AWM5.936.334.839.051.928.939.418.8
ASI (ours)5.040.440.144.054.732.243.120.8
", + "image_path": "4f88725b8dc9876aae1dde5be6e27efcb666ca61c19b6ac75e2549b3303cabd2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 156, + 504, + 179 + ], + "lines": [ + { + "bbox": [ + 104, + 156, + 504, + 179 + ], + "spans": [ + { + "bbox": [ + 104, + 156, + 504, + 179 + ], + "type": "text", + "content": "Table 1: WebArena success rate by adaptive agents with programmatic skills, in comparison to a static vanilla agent baseline, and a text-skill learning adaptive agent." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 191, + 506, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 191, + 506, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 191, + 506, + 258 + ], + "type": "text", + "content": "five major web activity domains: e-commerce, social forum, software development, content management, and travel. Each example in WebArena has an NL query " + }, + { + "bbox": [ + 104, + 191, + 506, + 258 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 191, + 506, + 258 + ], + "type": "text", + "content": " for the task, and a program-based evaluator that provides a binary " + }, + { + "bbox": [ + 104, + 191, + 506, + 258 + ], + "type": "inline_equation", + "content": "0/1" + }, + { + "bbox": [ + 104, + 191, + 506, + 258 + ], + "type": "text", + "content": " score for any given trajectory " + }, + { + "bbox": [ + 104, + 191, + 506, + 258 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 191, + 506, + 258 + ], + "type": "text", + "content": " to judge if it successfully solves the task " + }, + { + "bbox": [ + 104, + 191, + 506, + 258 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 191, + 506, + 258 + ], + "type": "text", + "content": ". This program-based evaluator enables relatively rigorous evaluation based on the functional correctness of the action trajectory. We report the average score across all WebArena examples, if not specified otherwise." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 263, + 506, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 263, + 506, + 320 + ], + "spans": [ + { + "bbox": [ + 104, + 263, + 506, + 320 + ], + "type": "text", + "content": "Backbone LM and Agent Architecture We use the top-performing claude-3.5-sonnet model as the LM backbone for all components, including the agent policy " + }, + { + "bbox": [ + 104, + 263, + 506, + 320 + ], + "type": "inline_equation", + "content": "\\pi" + }, + { + "bbox": [ + 104, + 263, + 506, + 320 + ], + "type": "text", + "content": ", the neural evaluator " + }, + { + "bbox": [ + 104, + 263, + 506, + 320 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 104, + 263, + 506, + 320 + ], + "type": "text", + "content": ", and the skill induction modules " + }, + { + "bbox": [ + 104, + 263, + 506, + 320 + ], + "type": "inline_equation", + "content": "I" + }, + { + "bbox": [ + 104, + 263, + 506, + 320 + ], + "type": "text", + "content": ". For experimentation, we use the BrowserGym (Chezelles et al., 2024) framework, which takes the webpage accessibility tree as observation, and instantiates the skill library " + }, + { + "bbox": [ + 104, + 263, + 506, + 320 + ], + "type": "inline_equation", + "content": "\\mathcal{A}" + }, + { + "bbox": [ + 104, + 263, + 506, + 320 + ], + "type": "text", + "content": " with the WebArena default action space listed in " + }, + { + "bbox": [ + 104, + 263, + 506, + 320 + ], + "type": "inline_equation", + "content": "\\S A" + }, + { + "bbox": [ + 104, + 263, + 506, + 320 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 324, + 506, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 324, + 506, + 413 + ], + "spans": [ + { + "bbox": [ + 104, + 324, + 506, + 413 + ], + "type": "text", + "content": "Baselines We take the vanilla Claude model with the BrowserGym framework (Drouin et al., 2024) as the non-adaptive agent baseline. Additionally, we compare ASI to AWM (Wang et al., 2024b), the current top-performing online adaptive web agent method. Because AWM was originally developed with the gpt-4o model, for a fairer comparison, we also experiment with AWM with claude-3.5-sonnet model as its LM backbone and also apply the episode cleaning procedure to enhance induction quality. We compare the two baseline methods with our ASI approach. We provide the complete prompts for each agent component: task-solving, episode evaluation, episode cleaning, and skill induction, in SA." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 427, + 228, + 439 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 427, + 228, + 439 + ], + "spans": [ + { + "bbox": [ + 105, + 427, + 228, + 439 + ], + "type": "text", + "content": "3.2 Results and Analysis" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 448, + 506, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 448, + 506, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 448, + 506, + 548 + ], + "type": "text", + "content": "In Table 1, compared to the vanilla static-agent baseline, adaptive agents (AWM and ASI) generally achieve " + }, + { + "bbox": [ + 104, + 448, + 506, + 548 + ], + "type": "inline_equation", + "content": "11.0 - 23.5\\%" + }, + { + "bbox": [ + 104, + 448, + 506, + 548 + ], + "type": "text", + "content": " higher success rates overall. Among adaptive agents, our ASI with programmatic skills, achieves another " + }, + { + "bbox": [ + 104, + 448, + 506, + 548 + ], + "type": "inline_equation", + "content": "11.3\\%" + }, + { + "bbox": [ + 104, + 448, + 506, + 548 + ], + "type": "text", + "content": " success rate gain across websites, compared to its AWM counterpart that induces and uses textual skills. Meanwhile, ASI offers additional efficiency benefits by reducing the number of steps in solutions by " + }, + { + "bbox": [ + 104, + 448, + 506, + 548 + ], + "type": "inline_equation", + "content": "15.3\\%" + }, + { + "bbox": [ + 104, + 448, + 506, + 548 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 448, + 506, + 548 + ], + "type": "inline_equation", + "content": "10.6\\%" + }, + { + "bbox": [ + 104, + 448, + 506, + 548 + ], + "type": "text", + "content": " than vanilla and AWM agents, as one skill-call action can often execute multiple steps written in primitive actions used by vanilla and AWM agents. These advantages in correctness and efficiency are exhibited prominently across different websites and tasks, as shown by the website breakdown on Table 1 (right). Refer to " + }, + { + "bbox": [ + 104, + 448, + 506, + 548 + ], + "type": "inline_equation", + "content": "\\S B" + }, + { + "bbox": [ + 104, + 448, + 506, + 548 + ], + "type": "text", + "content": " for more analysis." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 562, + 298, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 562, + 298, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 562, + 298, + 574 + ], + "type": "text", + "content": "3.3 Why are Programmatic Skills Better?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 506, + 628 + ], + "type": "text", + "content": "To more concretely answer why programmatic skills are more effective than textual skills, we take a closer look on the two main differences between AWM and ASI: [1] whether the induction outcome is verified via execution, and [2] whether the induced skills are provided in memory for reference purpose only, or in the action space that allows execution." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 633, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 633, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 633, + 506, + 733 + ], + "type": "text", + "content": "Better Induction Quality We take the shopping website as a representative, and analyze the textual and program skills induced by AWM and ASI agents. We group textual and program skills by their functionality and show one representative example in Table 2. Compared to the clear functional boundary and highly-reusable granularity of the search_product skill, we find that the textual skills often have (1) more redundant steps, (2) example-specific context: e.g., the last text skill aims to find 'game accessories' while the steps generally apply to any product, and (3) fuzzier boundaries between separable tasks, e.g., the first skill mixes product-search and add-to-wishlist procedures, thus may not offer optimal guidance when asked to, e.g., search product and add it to cart instead." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 70, + 504, + 311 + ], + "blocks": [ + { + "bbox": [ + 106, + 70, + 504, + 311 + ], + "lines": [ + { + "bbox": [ + 106, + 70, + 504, + 311 + ], + "spans": [ + { + "bbox": [ + 106, + 70, + 504, + 311 + ], + "type": "table", + "html": "
Programmatic SkillsTextual Skills
def search_product_search_box_id: str, query: str):\n ""Search for a product using the search box.\n Args:\n search_box_id: ID of the search input field\n query: Search query string to enter\n Returns:\n None\nExamples:\n search_product('595', 'sony bluetooth headphones')\n ""\nclick_search_box_id)\nfill_search_box_id, query)\nkeyboard_press('Enter'))Task: Search for a product and add it to wish list\nAction Trajectory:\n __fill(621, {product_name})__ # Enter the product\nname in the search box\n __click(478)__ # Click the search button to execute the\nsearch\n __click({product_link})__ # Click the product to check\nmore details\n __click(1769)__ # Click the "Add to Wish List" link
Task: Search for a product's price range in the store\nAction Trajectory:\n __fill(565, {product_name})__ # Enter the product\nname in the search box\n __click(570)__ # Click the search button to execute the\nsearch\n __noop(1000)__ # Wait for search results to load\n __send msg_to_user({price_range_info})__ # \nAnalyze and report the price range findings from the\nsearch results
Task: Search for gaming accessories within a date range\nAction Trajectory:\n __click(1274)__ # Navigate to the Video Games category\n __fill(473, {search_terms})__ # Enter search terms\nincluding product name and year\n __click(478)__ # Execute the search
", + "image_path": "495de9455d437d5fb7075c1e7ad702e4817af15dbe99fd41fe407351a3037267.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 344, + 506, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 344, + 506, + 501 + ], + "spans": [ + { + "bbox": [ + 104, + 344, + 506, + 501 + ], + "type": "text", + "content": "Verified Induction Improves End Success Rate From qualitative examination of the induction outcomes, we find roughly similar numbers of episodes evaluated as correct and used for induction (70 and 58 examples for AWM and ASI), ASI produced programs pass verification for only " + }, + { + "bbox": [ + 104, + 344, + 506, + 501 + ], + "type": "inline_equation", + "content": "15.6\\%" + }, + { + "bbox": [ + 104, + 344, + 506, + 501 + ], + "type": "text", + "content": " of the turns, whereas AWM adds new skills for " + }, + { + "bbox": [ + 104, + 344, + 506, + 501 + ], + "type": "inline_equation", + "content": "31.4\\%" + }, + { + "bbox": [ + 104, + 344, + 506, + 501 + ], + "type": "text", + "content": " of the time (replace or add none otherwise). While skill usage (in memory or as action, [2]) is designated for AWM and ASI, we hypothesize that verification [1] affects induction quality and thus end success. We thus experiment with another setting that induces programs (such that verification is enabled), and only use the induced skills in memory, to study the importance of induction quality. As shown in Table 3, inducing skills with execution-based verification (i.e., (unverified, text) " + }, + { + "bbox": [ + 104, + 344, + 506, + 501 + ], + "type": "inline_equation", + "content": "\\rightarrow" + }, + { + "bbox": [ + 104, + 344, + 506, + 501 + ], + "type": "text", + "content": " (verified, program)), while always present skills in memory, improves end success rate by 4.2 points, indicating the importance of higher-quality induction via verification. Yet it is still 3.7 points lower than ASI, suggesting the incompatibility of program format to agent memory. Indeed, we observe many cases where the agent tries to call the skill programs but unsuccessfully, since they are not supported in the action space." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 505, + 297, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 505, + 297, + 614 + ], + "spans": [ + { + "bbox": [ + 105, + 505, + 297, + 614 + ], + "type": "text", + "content": "Textual Representations Suit Memory Better To prevent the agent from trying to call these plausible programs, we ablate another setting that transforms program skills to textual format (as Table 2 right) and provide them in agent memory, dubbed (verified, text). This format transformation effectively improves the overall success rate by another 2.6 points, getting a little closer to ASI. Given the different downstream usage," + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 304, + 508, + 503, + 580 + ], + "blocks": [ + { + "bbox": [ + 130, + 316, + 478, + 330 + ], + "lines": [ + { + "bbox": [ + 130, + 316, + 478, + 330 + ], + "spans": [ + { + "bbox": [ + 130, + 316, + 478, + 330 + ], + "type": "text", + "content": "Table 2: Example textual and program skills induced on the shopping website." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 304, + 508, + 503, + 580 + ], + "lines": [ + { + "bbox": [ + 304, + 508, + 503, + 580 + ], + "spans": [ + { + "bbox": [ + 304, + 508, + 503, + 580 + ], + "type": "table", + "html": "
MethodSR
Add to Memoryunverified, text32.6
verified, program36.4
verified, text39.0
Add as Actionsverified, program40.1
", + "image_path": "fb0d1499b20375bbf7fa5c5dc65ac574e4f19e2c831ae86c1eae513719edaffe.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 583, + 506, + 607 + ], + "lines": [ + { + "bbox": [ + 302, + 583, + 506, + 607 + ], + "spans": [ + { + "bbox": [ + 302, + 583, + 506, + 607 + ], + "type": "text", + "content": "Table 3: Ablation study on induction verification and format on the shopping website." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 615, + 506, + 627 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 615, + 506, + 627 + ], + "spans": [ + { + "bbox": [ + 104, + 615, + 506, + 627 + ], + "type": "text", + "content": "i.e., memory or actuation, textual and program formats may suit individual scenarios better." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 631, + 505, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 631, + 505, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 631, + 505, + 654 + ], + "type": "text", + "content": "Beyond basic web navigation tasks, in the next two sections, we examine agents in two other important scenarios, scaled-up activities (§4) and cross-website generalization (§5)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 672, + 294, + 687 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 672, + 294, + 687 + ], + "spans": [ + { + "bbox": [ + 104, + 672, + 294, + 687 + ], + "type": "text", + "content": "4 Scaled-Up Browsing Activities" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 505, + 734 + ], + "type": "text", + "content": "The WebArena benchmark mainly features isolated, single-task scenarios, such as adding a single product to the shopping cart. However, in real-world practices, people need to do a series of such tasks together, such as adding multiple related products (e.g., coffee and" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": "mug) to the cart before finally checking out. This browsing request can lead to extremely long-horizon tasks, sometimes with repetitive intermediate procedures. We identify this to be a scenario to further demonstrate the efficacy of program skills, as opposed to textual skills, as programs lend themselves naturally to repeated invocation and composition." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 132, + 506, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 132, + 506, + 210 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 506, + 210 + ], + "type": "text", + "content": "Therefore, we curate several case scenarios where the user asks for action-dense instructions, such as the tasks listed in Figure 3. Because the tasks are long-horizon and involve multiple sub-tasks, we follow Xu et al. (2024) and set up intermediate checkpoints to better track the intermediate progress of agents. Refer to SC.1 to see the full list of tasks and their evaluation checkpoints. We measure the success rate of each example by the percentage of checkpoints achieved by the agent. We report the average success rate of all examples, as well as the average number of steps taken to solve the tasks, in Table 4." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 107, + 217, + 504, + 275 + ], + "blocks": [ + { + "bbox": [ + 107, + 217, + 504, + 275 + ], + "lines": [ + { + "bbox": [ + 107, + 217, + 504, + 275 + ], + "spans": [ + { + "bbox": [ + 107, + 217, + 504, + 275 + ], + "type": "table", + "html": "
MethodShoppingAdminRedditGitLabMap
sr ↑# steps ↓sr ↑# steps ↓sr ↑# steps ↓sr ↑# steps ↓sr ↑# steps ↓
VANILLA41.723.558.020.833.323.033.340.040.015.2
AWM68.321.574.018.240.016.850.033.865.012.6
ASI (ours)100.016.391.014.255.012.855.025.4100.06.2
", + "image_path": "1148f878af9686e2ea67d104ffeb29c5fefb949586578f2979b3fc083583026f.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 281, + 506, + 316 + ], + "lines": [ + { + "bbox": [ + 104, + 281, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 281, + 506, + 316 + ], + "type": "text", + "content": "Table 4: Performance of vanilla, AWM, and ASI agents in scaled-up browsing scenarios. We perform statistical testing between ASI and each baseline and verify all improvements are statistically significant with t-statistics " + }, + { + "bbox": [ + 104, + 281, + 506, + 316 + ], + "type": "inline_equation", + "content": "|t| > 2" + }, + { + "bbox": [ + 104, + 281, + 506, + 316 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 281, + 506, + 316 + ], + "type": "inline_equation", + "content": "p < 0.05" + }, + { + "bbox": [ + 104, + 281, + 506, + 316 + ], + "type": "text", + "content": "; see SC.3 for more details." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 326, + 505, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 326, + 505, + 372 + ], + "spans": [ + { + "bbox": [ + 104, + 326, + 505, + 372 + ], + "type": "text", + "content": "ASI Features Improved Efficiency Across all websites, ASI-produced trajectories have 6.6-14.6 and " + }, + { + "bbox": [ + 104, + 326, + 505, + 372 + ], + "type": "inline_equation", + "content": "4.0 - 8.4\\%" + }, + { + "bbox": [ + 104, + 326, + 505, + 372 + ], + "type": "text", + "content": " fewer steps, compared to vanilla and AWM baselines, respectively. As the task horizon continues to grow when involving more intermediate checkpoints, this margin between ASI and baselines will predictably be more prominent." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 376, + 504, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 376, + 504, + 433 + ], + "spans": [ + { + "bbox": [ + 104, + 376, + 504, + 433 + ], + "type": "text", + "content": "Subsequent Benefits in Success Rate ASI also achieves higher success rates with more efficient trajectories, outperforming vanilla and AWM baselines by " + }, + { + "bbox": [ + 104, + 376, + 504, + 433 + ], + "type": "inline_equation", + "content": "38.9\\%" + }, + { + "bbox": [ + 104, + 376, + 504, + 433 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 376, + 504, + 433 + ], + "type": "inline_equation", + "content": "20.7\\%" + }, + { + "bbox": [ + 104, + 376, + 504, + 433 + ], + "type": "text", + "content": " on average. From manual analysis, we find this improvement comes from easier, better agent planning when using higher-level skills, without the need to tackle more complex procedures if only low-level primitive actions are available, as with vanilla and AWM agents." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 437, + 506, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 437, + 506, + 494 + ], + "spans": [ + { + "bbox": [ + 104, + 437, + 506, + 494 + ], + "type": "text", + "content": "Case Study: Changing Multiple Addresses We present a representative case on the shopping website: changing billing and shipping addresses after moving. As depicted in the top row in Figure 3, the vanilla agent without adaptive skills often roams into some irrelevant exploration steps, instead of sticking to the optimal route to solve the required task. It runs for minutes and exhausts the maximum steps (i.e., 50) before finishing the task." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 106, + 498, + 506, + 714 + ], + "blocks": [ + { + "bbox": [ + 106, + 498, + 506, + 714 + ], + "lines": [ + { + "bbox": [ + 106, + 498, + 506, + 714 + ], + "spans": [ + { + "bbox": [ + 106, + 498, + 506, + 714 + ], + "type": "image", + "image_path": "810aadc8162de8b670c598520349aa2a623d11538656c116f1a472d096254632.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 111, + 719, + 499, + 734 + ], + "lines": [ + { + "bbox": [ + 111, + 719, + 499, + 734 + ], + "spans": [ + { + "bbox": [ + 111, + 719, + 499, + 734 + ], + "type": "text", + "content": "Figure 3: Example scaled-up task of updating multiple addresses on shopping website." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 506, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 506, + 128 + ], + "type": "text", + "content": "With AWM, adding textual skills in memory provides soft guidelines for agents to follow, the agent thus better sticks to the goal and finishes each part of the task (i.e., navigate to the address page, update billing/shipping address) step by step. Although successful, the trajectory is long, i.e., 27 steps, and still takes a few minutes to finish." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 132, + 504, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 132, + 504, + 178 + ], + "spans": [ + { + "bbox": [ + 104, + 132, + 504, + 178 + ], + "type": "text", + "content": "In comparison, ASI (in Figure 3 bottom row) showcases its efficiency by using learned skills to navigate_to_address_settings and update_address_details can solve each part in one step (vs. the 3-6 steps used by AWM for these parts). Overall, ASI correctly finishes all required actions in only 4 steps, shortening the horizon by " + }, + { + "bbox": [ + 104, + 132, + 504, + 178 + ], + "type": "inline_equation", + "content": "85.2\\%" + }, + { + "bbox": [ + 104, + 132, + 504, + 178 + ], + "type": "text", + "content": " compared to AWM." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 195, + 270, + 211 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 195, + 270, + 211 + ], + "spans": [ + { + "bbox": [ + 105, + 195, + 270, + 211 + ], + "type": "text", + "content": "5 Adapting Across Websites" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 222, + 258, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 222, + 258, + 310 + ], + "spans": [ + { + "bbox": [ + 104, + 222, + 258, + 310 + ], + "type": "text", + "content": "To examine whether agents can generalize with learned skills, we test agents on real-world website counterparts for some of the domains in WebArena as listed in Table 5. This experiment setup can reflect on (1) transfer across different websites of the same domain," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 310, + 402, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 310, + 402, + 321 + ], + "spans": [ + { + "bbox": [ + 105, + 310, + 402, + 321 + ], + "type": "text", + "content": "and (2) transfer from simulated, sandboxed to real-world websites." + } + ] + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 266, + 224, + 500, + 277 + ], + "blocks": [ + { + "bbox": [ + 266, + 224, + 500, + 277 + ], + "lines": [ + { + "bbox": [ + 266, + 224, + 500, + 277 + ], + "spans": [ + { + "bbox": [ + 266, + 224, + 500, + 277 + ], + "type": "table", + "html": "
DomainWebArena SandboxedReal-World
shoppingOneStopMarketTarget
online forumPostMillReddit
travelOpenStreetMapGoogle Maps
", + "image_path": "b55e238f84562ee1941db044ca264e0042cc5add90554d1fc6dd8b5509f25b8a.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 263, + 279, + 504, + 301 + ], + "lines": [ + { + "bbox": [ + 263, + 279, + 504, + 301 + ], + "spans": [ + { + "bbox": [ + 263, + 279, + 504, + 301 + ], + "type": "text", + "content": "Table 5: Real-world in-domain website counterparts to each WebArena sandboxed website." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 326, + 506, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 326, + 506, + 384 + ], + "spans": [ + { + "bbox": [ + 104, + 326, + 506, + 384 + ], + "type": "text", + "content": "For each sandbox-real website pair, we take ten information-seeking style queries (He et al., 2024) in WebArena that do not involve potential privacy leakage or unrecoverable risky actions, such as making a purchase or changing user password. We provide the task details in §C.2. We compare ASI and AWM with their programmatic and textual skills as learned in §3, as well as comparing to the vanilla static agent baseline." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 388, + 237, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 388, + 237, + 455 + ], + "spans": [ + { + "bbox": [ + 105, + 388, + 237, + 455 + ], + "type": "text", + "content": "Transferring Common Skills In Figure 4, we can see how ASI can effectively reuse common skills such as search_product in the first step on the Target website." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 459, + 239, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 459, + 239, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 459, + 239, + 559 + ], + "type": "text", + "content": "Incompatible Skills One challenge faced by ASI is that some prior skills become incompatible on the new website. For example, the sort_by Listings() induced on OneStopMarket selects options from a dropdown menu, yet sorting on" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 559, + 504, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 559, + 504, + 593 + ], + "spans": [ + { + "bbox": [ + 104, + 559, + 504, + 593 + ], + "type": "text", + "content": "the Target website opens a sidebar; despite their semantic similarity, the concrete actions in skill programs are no longer applicable. Still, we find that agents can often spot this incompatibility and rarely attempt to use these deprecated skills." + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 244, + 392, + 500, + 523 + ], + "blocks": [ + { + "bbox": [ + 244, + 392, + 500, + 523 + ], + "lines": [ + { + "bbox": [ + 244, + 392, + 500, + 523 + ], + "spans": [ + { + "bbox": [ + 244, + 392, + 500, + 523 + ], + "type": "image", + "image_path": "32b98aeaec3843cbaad2caf4552567bbfe930405b8409da2257233ba69eff45a.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 243, + 529, + 504, + 553 + ], + "lines": [ + { + "bbox": [ + 243, + 529, + 504, + 553 + ], + "spans": [ + { + "bbox": [ + 243, + 529, + 504, + 553 + ], + "type": "text", + "content": "Figure 4: ASI can generalize the search_product skill but face incompatibility when sorting items." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 597, + 504, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 597, + 504, + 609 + ], + "spans": [ + { + "bbox": [ + 104, + 597, + 504, + 609 + ], + "type": "text", + "content": "Adapting Skills to New Environment Although some skills induced on previous websites" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 609, + 226, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 609, + 226, + 708 + ], + "spans": [ + { + "bbox": [ + 104, + 609, + 226, + 708 + ], + "type": "text", + "content": "cannot be directly used on arbitrary new websites, we hypothesize that these skills can still serve as informative references on solving procedurally similar tasks or composing new skills targeted for the new website design." + } + ] + } + ], + "index": 15 + }, + { + "type": "table", + "bbox": [ + 235, + 612, + 500, + 689 + ], + "blocks": [ + { + "bbox": [ + 235, + 612, + 500, + 689 + ], + "lines": [ + { + "bbox": [ + 235, + 612, + 500, + 689 + ], + "spans": [ + { + "bbox": [ + 235, + 612, + 500, + 689 + ], + "type": "table", + "html": "
MethodShoppingRedditMap
sr ↑# steps ↓sr ↑# steps ↓sr ↑# steps ↓
Vanilla80.05.440.04.863.37.4
AWM80.05.056.74.8100.06.2
ASI90.03.476.74.493.34.4
AWM + update80.05.463.35.8100.07.2
ASI + update90.03.276.74.093.34.2
", + "image_path": "32d2941bc49fc2c302ec4d75a44393a68d9505c660e98d27b329443e04fd912a.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 231, + 693, + 506, + 715 + ], + "lines": [ + { + "bbox": [ + 231, + 693, + 506, + 715 + ], + "spans": [ + { + "bbox": [ + 231, + 693, + 506, + 715 + ], + "type": "text", + "content": "Table 6: Cross-website results. ASI significantly surpasses baselines in sr and # steps (with " + }, + { + "bbox": [ + 231, + 693, + 506, + 715 + ], + "type": "inline_equation", + "content": "|t| > 2" + }, + { + "bbox": [ + 231, + 693, + 506, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 231, + 693, + 506, + 715 + ], + "type": "inline_equation", + "content": "p < 0.05" + }, + { + "bbox": [ + 231, + 693, + 506, + 715 + ], + "type": "text", + "content": ") from our analysis in §C.3." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 504, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 504, + 732 + ], + "type": "text", + "content": "2We did not test on administrative and software websites given their more severe safety concerns." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 507, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 507, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 507, + 140 + ], + "type": "text", + "content": "We thus allow agents to induce new skills or update previously acquired skills from experiences on the new website, denoted as +update entries in Table 6. We find that enabling skill update in both textual and program formats helps agent performance on new websites. Within the short online learning process (tens of examples), AWM adapts faster to the new websites, while ASI sees a more pronounced improvement in efficiency." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 156, + 201, + 168 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 156, + 201, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 201, + 168 + ], + "type": "text", + "content": "6 Related Work" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 182, + 506, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 182, + 506, + 338 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 506, + 338 + ], + "type": "text", + "content": "Adaptive Digital Agents An important thread of agent-improving methods is to build adaptive agents that can autonomously self-improve from experiences. Most works focus on integrating past experiences into agent memory by collecting human annotation (Deng et al., 2024) or LM-based synthesis (Ou et al., 2024; Xu et al., 2025), especially via agent-driven exploration with instruction- (Murty et al., 2024b) or trajectory-driven (Murty et al., 2024a) approaches, offering warm starts on the websites of interest. Other works gather experiences (Wang et al., 2024b) or feedback (Qu et al., 2024) during test time, and augment them into memory through parametric channels such as supervised fine-tuning (Murty et al., 2024a), contrastive learning (Song et al., 2024), or reinforcement learning (Zhou et al., 2024b). Meanwhile, non-parametric approaches can directly augment reflections (Shinn et al., 2023), raw past experiences (Wang et al., 2023; Zheng et al., 2023), or further induced reusable workflows (Wang et al., 2024b). While these adaptive agents learn textual skills stored in memory, our ASI stores skills as verifiable and composable programs in the agent action space (i.e., skill library), thus enabling better quality and efficiency." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 341, + 507, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 341, + 507, + 485 + ], + "spans": [ + { + "bbox": [ + 104, + 341, + 507, + 485 + ], + "type": "text", + "content": "Skill Discovery and Learning Learning specialized skills for tasks in programmatic (Shin et al., 2019; Ellis et al., 2023; Cai et al., 2024; Wang et al., 2024a; Grand et al., 2024), embodied (Sharma et al., 2022; Wang et al., 2023; Liang et al., 2023; Sarch et al., 2024; Wong et al., 2024), and physical (Yu et al., 2023) environments has shown to success in agent performance. Particularly for digital agents built for web navigation tasks, most works focus on exploring skills offline with RL roll-outs (Gur et al., 2018; Liu et al., 2018; Putta et al., 2024; Qi et al., 2024) or LM-based prompting (Zhou et al., 2024b; Murty et al., 2024a; Patel et al., 2024). While this exploration stage could offer some supervised data to update the agent policy either parametric (Murty et al., 2024a; Patel et al., 2024) or non-parametrically (Zheng et al., 2023; Murty et al., 2024b), it often costs enormous extra computation and may suffer from the lack or mismatch in distribution with the downstream tasks at hand (Wang et al., 2024b). In contrast, our ASI does not rely on supervised data and can directly learn skills online without prior exploration." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 490, + 507, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 490, + 507, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 490, + 507, + 578 + ], + "type": "text", + "content": "Web Navigation Benchmarks Digital agents have been explored across a wide range of tasks (Yao et al., 2024; Kapoor et al., 2025; Xie et al., 2024b), among which one of the most popular application being browsing and navigating through versatile websites such as shopping (Yao et al., 2022), social media communication (Zhou et al., 2024a; Koh et al., 2024), knowledge work tasks (Drouin et al., 2024), and more (Deng et al., 2024). Our work focuses on general web navigation tasks using the WebArena (Zhou et al., 2024a) benchmark, meanwhile exploring other challenging scenarios such as scaled-up activities (Yoran et al., 2024) and cross-domain generalization (Deng et al., 2024)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 596, + 321, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 596, + 321, + 609 + ], + "spans": [ + { + "bbox": [ + 105, + 596, + 321, + 609 + ], + "type": "text", + "content": "7 Conclusion and Future Discussions" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 621, + 507, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 621, + 507, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 621, + 507, + 734 + ], + "type": "text", + "content": "In this work, we present ASI to support web navigation agents to autonomously induce, verify, learn, and apply programmatic skills during online inference. Beyond achieving " + }, + { + "bbox": [ + 104, + 621, + 507, + 734 + ], + "type": "inline_equation", + "content": "23.5\\%" + }, + { + "bbox": [ + 104, + 621, + 507, + 734 + ], + "type": "text", + "content": " success rate and " + }, + { + "bbox": [ + 104, + 621, + 507, + 734 + ], + "type": "inline_equation", + "content": "15.3\\%" + }, + { + "bbox": [ + 104, + 621, + 507, + 734 + ], + "type": "text", + "content": " efficiency increases in general web tasks, we also showcase ASI's strengths for scaled-up web activities, thanks to the high-level action interface offered by the programmatic abstraction. Moreover, we examine skill generalizability to new, real-world websites, and find ASI still offers great efficiency while flexibly updating skills to new environments. While our work aims to offer insights on the optimal representation in agent skill acquisition, we still find multiple pieces in ASI worthy of further investigation, such as the conceptually or empirically suitable granularity of skills, the stability of the online evolving process, and the skill quality in comparison to human expert desiderata." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 212, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 212, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 212, + 95 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 106, + 506, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 152 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 152 + ], + "type": "text", + "content": "We would like to thank Jiayuan Mao, Yueqi Song, Boyuan Zheng, and Yu Su for the insightful discussions. We thank Yiqing Xie, Xinran Zhao, and Mingqian Zheng for their helpful comments on the paper draft. Zora is supported by the CMU Presidential Fellowship and Fujitsu Research. Apurva is supported by Amazon." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 167, + 168, + 180 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 167, + 168, + 180 + ], + "spans": [ + { + "bbox": [ + 105, + 167, + 168, + 180 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 187, + 507, + 733 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 106, + 187, + 506, + 300 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 187, + 506, + 300 + ], + "spans": [ + { + "bbox": [ + 106, + 187, + 506, + 300 + ], + "type": "text", + "content": "Tom Brown, Benjamin Mann, Nick Ryder, Melanie Subbiah, Jared D Kaplan, Prafulla Dhariwal, Arvind Neelakantan, Pranav Shyam, Girish Sastry, Amanda Askell, Sandhini Agarwal, Ariel Herbert-Voss, Gretchen Krueger, Tom Henighan, Rewon Child, Aditya Ramesh, Daniel Ziegler, Jeffrey Wu, Clemens Winter, Chris Hesse, Mark Chen, Eric Sigler, Mateusz Litwin, Scott Gray, Benjamin Chess, Jack Clark, Christopher Berner, Sam McCandlish, Alec Radford, Ilya Sutskever, and Dario Amodei. Language models are few-shot learners. In H. Larochelle, M. Ranzato, R. Hadsell, M.F. Balcan, and H. Lin (eds.), Advances in Neural Information Processing Systems, volume 33, pp. 1877-1901. Curran Associates, Inc., 2020. URL https://proceedings.neurips.cc/paper_files/paper/2020/file/1457c0d6bfcb4967418bf8ac142f64a-Paper.pdf." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 306, + 507, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 306, + 507, + 342 + ], + "spans": [ + { + "bbox": [ + 106, + 306, + 507, + 342 + ], + "type": "text", + "content": "Tianle Cai, Xuezhi Wang, Tengyu Ma, Xinyun Chen, and Denny Zhou. Large language models as tool makers. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=qV83K9d5WB." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 348, + 506, + 384 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 348, + 506, + 384 + ], + "spans": [ + { + "bbox": [ + 105, + 348, + 506, + 384 + ], + "type": "text", + "content": "De Chezelles, Thibault Le Sellier, Maxime Gasse, Alexandre Lacoste, Alexandre Drouin, Massimo Caccia, Léo Boisvert, Megh Thakkar, Tom Marty, Rim Assouel, et al. The browsergym ecosystem for web agent research. arXiv preprint arXiv:2412.05467, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 390, + 505, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 390, + 505, + 426 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 505, + 426 + ], + "type": "text", + "content": "Xiang Deng, Yu Gu, Boyuan Zheng, Shijie Chen, Sam Stevens, Boshi Wang, Huan Sun, and Yu Su. Mind2web: Towards a generalist agent for the web. Advances in Neural Information Processing Systems, 36, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 433, + 506, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 433, + 506, + 479 + ], + "spans": [ + { + "bbox": [ + 106, + 433, + 506, + 479 + ], + "type": "text", + "content": "Alexandre Drouin, Maxime Gasse, Massimo Caccia, Issam H Laradji, Manuel Del Verme, Tom Marty, Léo Boisvert, Megh Thakkar, Quentin Cappart, David Vazquez, et al. Workarena: How capable are web agents at solving common knowledge work tasks? arXiv preprint arXiv:2403.07718, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 485, + 506, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 485, + 506, + 532 + ], + "spans": [ + { + "bbox": [ + 106, + 485, + 506, + 532 + ], + "type": "text", + "content": "Kevin Ellis, Lionel Wong, Maxwell Nye, Mathias Sable-Meyer, Luc Cary, Lore Anaya Pozo, Luke Hewitt, Armando Solar-Lezama, and Joshua B Tenenbaum. Dreamcoder: growing generalizable, interpretable knowledge with wake-sleep bayesian program learning. Philosophical Transactions of the Royal Society A, 381(2251):20220050, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 538, + 506, + 585 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 538, + 506, + 585 + ], + "spans": [ + { + "bbox": [ + 106, + 538, + 506, + 585 + ], + "type": "text", + "content": "Yao Fu, Dong-Ki Kim, Jaekyeom Kim, Sungryull Sohn, Lajanugen Logeswaran, Kyunghoon Bae, and Honglak Lee. Autoguide: Automated generation and selection of state-aware guidelines for large language model agents. CoRR, abs/2403.08978, 2024. URL https://doi.org/10.48550/arXiv.2403.08978." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 591, + 506, + 638 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 591, + 506, + 638 + ], + "spans": [ + { + "bbox": [ + 106, + 591, + 506, + 638 + ], + "type": "text", + "content": "Gabriel Grand, Lionel Wong, Matthew Bowers, Theo X. Olausson, Muxin Liu, Joshua B. Tenenbaum, and Jacob Andreas. LILO: Learning interpretable libraries by compressing and documenting code. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=TqYbAWKMJe." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 644, + 504, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 644, + 504, + 669 + ], + "spans": [ + { + "bbox": [ + 106, + 644, + 504, + 669 + ], + "type": "text", + "content": "Izzeddin Gur, Ulrich Rückert, Aleksandra Faust, and Dilek Hakkani-Tür. Learning to navigate the web. CoRR, abs/1812.09195, 2018. URL http://arxiv.org/abs/1812.09195." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 675, + 505, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 675, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 106, + 675, + 505, + 733 + ], + "type": "text", + "content": "Hongliang He, Wenlin Yao, Kaixin Ma, Wenhao Yu, Yong Dai, Hongming Zhang, Zhenzhong Lan, and Dong Yu. Webvoyager: Building an end-to-end web agent with large multimodal models. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics, 2024. URL https://aclanthology.org/2024.acl-long.371/." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 731 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 107, + 81, + 505, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 505, + 128 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 505, + 128 + ], + "type": "text", + "content": "Raghav Kapoor, Yash Parag Butala, Melisa Russak, Jing Yu Koh, Kiran Kamble, Waseem AlShikh, and Ruslan Salakhutdinov. Omniact: A dataset and benchmark for enabling multimodal generalist autonomous agents for desktop and web. In European Conference on Computer Vision, pp. 161-178. Springer, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 133, + 506, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 133, + 506, + 179 + ], + "spans": [ + { + "bbox": [ + 105, + 133, + 506, + 179 + ], + "type": "text", + "content": "Jing Yu Koh, Robert Lo, Lawrence Jang, Vikram Duvvur, Ming Chong Lim, Po-Yu Huang, Graham Neubig, Shuyan Zhou, Ruslan Salakhutdinov, and Daniel Fried. Visualwebarena: Evaluating multimodal agents on realistic visual web tasks. arXiv preprint arXiv:2401.13649, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 186, + 506, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 186, + 506, + 220 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 506, + 220 + ], + "type": "text", + "content": "Sergey Levine, Aviral Kumar, George Tucker, and Justin Fu. Offline reinforcement learning: Tutorial, review, and perspectives on open problems. arXiv preprint arXiv:2005.01643, 2020." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 228, + 506, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 228, + 506, + 272 + ], + "spans": [ + { + "bbox": [ + 105, + 228, + 506, + 272 + ], + "type": "text", + "content": "Jacky Liang, Wenlong Huang, Fei Xia, Peng Xu, Karol Hausman, Brian Ichter, Pete Florence, and Andy Zeng. Code as policies: Language model programs for embodied control. In 2023 IEEE International Conference on Robotics and Automation (ICRA), pp. 9493-9500. IEEE, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 280, + 504, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 280, + 504, + 316 + ], + "spans": [ + { + "bbox": [ + 105, + 280, + 504, + 316 + ], + "type": "text", + "content": "Evan Zheran Liu, Kelvin Guu, Panupong Pasupat, and Percy Liang. Reinforcement learning on web interfaces using workflow-guided exploration. In International Conference on Learning Representations, 2018. URL https://openreview.net/forum?id=ryTp3f-0-." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 322, + 506, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 322, + 506, + 367 + ], + "spans": [ + { + "bbox": [ + 105, + 322, + 506, + 367 + ], + "type": "text", + "content": "Bodhisattwa Prasad Majumder, Bhavana Dalvi, Peter Jansen, Oyvind Tafjord, Niket Tandon, Li Zhang, Chris Callison-Burch, and Peter Clark. CLIN: A continually learning language agent for rapid task adaptation and generalization, 2024. URL https://openreview.net/forum?id=d5DGVHMsC." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 374, + 506, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 374, + 506, + 409 + ], + "spans": [ + { + "bbox": [ + 105, + 374, + 506, + 409 + ], + "type": "text", + "content": "Shikhar Murty, Dzmitry Bahdanau, and Christopher D. Manning. Nnetscape navigator: Complex demonstrations for web agents without a demonstrator, 2024a. URL https://arxiv.org/abs/2410.02907." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 415, + 506, + 451 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 415, + 506, + 451 + ], + "spans": [ + { + "bbox": [ + 105, + 415, + 506, + 451 + ], + "type": "text", + "content": "Shikhar Murty, Christopher Manning, Peter Shaw, Mandar Joshi, and Kenton Lee. Bagel: Bootstrapping agents by guiding exploration with language, 2024b. URL https://arxiv.org/abs/2403.08140." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 457, + 506, + 514 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 457, + 506, + 514 + ], + "spans": [ + { + "bbox": [ + 105, + 457, + 506, + 514 + ], + "type": "text", + "content": "Tianyue Ou, Frank F. Xu, Aman Madaan, Jiarui Liu, Robert Lo, Abishek Sridhar, Sudipta Sengupta, Dan Roth, Graham Neubig, and Shuyan Zhou. Synatra: Turning indirect knowledge into direct demonstrations for digital agents at scale. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. URL https://openreview.net/forum?id=KjNEzWRIqn." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 521, + 506, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 521, + 506, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 506, + 567 + ], + "type": "text", + "content": "Yichen Pan, Dehan Kong, Sida Zhou, Cheng Cui, Yifei Leng, Bing Jiang, Hangyu Liu, Yanyi Shang, Shuyan Zhou, Tongshuang Wu, and Zhengyang Wu. Webcanvas: Benchmarking web agents in online environments. In Agentic Markets Workshop at ICML 2024, 2024. URL https://openreview.net/forum?id=O1FaGasJob." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 573, + 506, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 573, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 105, + 573, + 506, + 608 + ], + "type": "text", + "content": "Ajay Patel, Markus Hofmarcher, Claudiu Leoveanu-Condrei, Marius-Constantin Dinu, Chris Callison-Burch, and Sepp Hochreiter. Large language models can self-improve at web agent tasks. arXiv preprint arXiv:2405.20309, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 614, + 506, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 614, + 506, + 649 + ], + "spans": [ + { + "bbox": [ + 105, + 614, + 506, + 649 + ], + "type": "text", + "content": "Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent q: Advanced reasoning and learning for autonomous ai agents. arXiv preprint arXiv:2408.07199, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 655, + 504, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 655, + 504, + 691 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 504, + 691 + ], + "type": "text", + "content": "Zehan Qi, Xiao Liu, Iat Long Iong, Hanyu Lai, Xueqiao Sun, Wenyi Zhao, Yu Yang, Xinyue Yang, Jiadai Sun, Shuntian Yao, et al. Webrl: Training llm web agents via self-evolving online curriculum reinforcement learning. arXiv preprint arXiv:2411.02337, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 697, + 506, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 697, + 506, + 731 + ], + "spans": [ + { + "bbox": [ + 105, + 697, + 506, + 731 + ], + "type": "text", + "content": "Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive introspection: Teaching language model agents how to self-improve, 2024. URL https://arxiv.org/abs/2407.18219." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 127 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 127 + ], + "type": "text", + "content": "Gabriel Sarch, Lawrence Jang, Michael Tarr, William W Cohen, Kenneth Marino, and Katerina Fragkiadaki. Vlm agents generate their own memories: Distilling experience into embodied programs of thought. Advances in Neural Information Processing Systems, 37: 75942-75985, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 133, + 505, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 133, + 505, + 158 + ], + "spans": [ + { + "bbox": [ + 107, + 133, + 505, + 158 + ], + "type": "text", + "content": "Amrith Setlur, Nived Rajaraman, Sergey Levine, and Aviral Kumar. Scaling test-time compute without verification or rl is suboptimal. arXiv preprint arXiv:2502.12118, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 163, + 506, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 163, + 506, + 209 + ], + "spans": [ + { + "bbox": [ + 106, + 163, + 506, + 209 + ], + "type": "text", + "content": "Pratyusha Sharma, Antonio Torralba, and Jacob Andreas. Skill induction and planning with latent language. In Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers). Association for Computational Linguistics, May 2022. URL https://aclanthology.org/2022.acl-long.120/." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 215, + 504, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 215, + 504, + 250 + ], + "spans": [ + { + "bbox": [ + 107, + 215, + 504, + 250 + ], + "type": "text", + "content": "Eui Chul Shin, Miltiadis Allamanis, Marc Brockschmidt, and Alex Polozov. Program synthesis and semantic parsing with learned code idioms. Advances in Neural Information Processing Systems, 32, 2019." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 256, + 506, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 256, + 506, + 291 + ], + "spans": [ + { + "bbox": [ + 105, + 256, + 506, + 291 + ], + "type": "text", + "content": "Noah Shinn, Federico Cassano, Edward Berman, Ashwin Gopinath, Karthik Narasimhan, and Shunyu Yao. Reflexion: Language agents with verbal reinforcement learning, 2023. URL https://arxiv.org/abs/2303.11366." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 297, + 506, + 331 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 297, + 506, + 331 + ], + "spans": [ + { + "bbox": [ + 107, + 297, + 506, + 331 + ], + "type": "text", + "content": "Yifan Song, Da Yin, Xiang Yue, Jie Huang, Sujian Li, and Bill Yuchen Lin. Trial and error: Exploration-based trajectory optimization for llm agents, 2024. URL https://arxiv.org/abs/2403.02502." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 338, + 504, + 373 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 338, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 107, + 338, + 504, + 373 + ], + "type": "text", + "content": "Guanzhi Wang, Yuqi Xie, Yunfan Jiang, Ajay Mandlekar, Chaowei Xiao, Yuke Zhu, Linxi Fan, and Anima Anandkumar. Voyager: An open-ended embodied agent with large language models, 2023. URL https://arxiv.org/abs/2305.16291." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 378, + 504, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 378, + 504, + 414 + ], + "spans": [ + { + "bbox": [ + 107, + 378, + 504, + 414 + ], + "type": "text", + "content": "Zhiruo Wang, Graham Neubig, and Daniel Fried. TroVE: Inducing verifiable and efficient toolboxes for solving programmatic tasks. In *Forty-first International Conference on Machine Learning*, 2024a. URL https://openreview.net/forum?id=DCNCwamJjI." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 419, + 504, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 419, + 504, + 443 + ], + "spans": [ + { + "bbox": [ + 107, + 419, + 504, + 443 + ], + "type": "text", + "content": "Zora Zhiruo Wang, Jiayuan Mao, Daniel Fried, and Graham Neubig. Agent workflow memory. arXiv preprint arXiv:2409.07429, 2024b." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 449, + 506, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 449, + 506, + 495 + ], + "spans": [ + { + "bbox": [ + 107, + 449, + 506, + 495 + ], + "type": "text", + "content": "Lionel Wong, Jiayuan Mao, Pratyusha Sharma, Zachary S Siegel, Jiahai Feng, Noa Korneev, Joshua B. Tenenbaum, and Jacob Andreas. Learning grounded action abstractions from language. In The Twelfth International Conference on Learning Representations, 2024. URL https://openreview.net/forum?id=qJ0Cfj4Ex9." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 501, + 506, + 547 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 501, + 506, + 547 + ], + "spans": [ + { + "bbox": [ + 107, + 501, + 506, + 547 + ], + "type": "text", + "content": "Jian Xie, Kai Zhang, Jiangjie Chen, Tinghui Zhu, Renze Lou, Yuandong Tian, Yanghua Xiao, and Yu Su. Travelplanner: A benchmark for real-world planning with language agents. In *Forty-first International Conference on Machine Learning*, 2024a. URL https://openreview.net/forum?id=15XQzNkAOe." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 553, + 506, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 553, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 107, + 553, + 506, + 597 + ], + "type": "text", + "content": "Tianbao Xie, Danyang Zhang, Jixuan Chen, Xiaochuan Li, Siheng Zhao, Ruisheng Cao, Toh Jing Hua, Zhoujun Cheng, Dongchan Shin, Fangyu Lei, et al. Osworld: Benchmarking multimodal agents for open-ended tasks in real computer environments. arXiv preprint arXiv:2404.07972, 2024b." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 605, + 504, + 640 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 605, + 504, + 640 + ], + "spans": [ + { + "bbox": [ + 107, + 605, + 504, + 640 + ], + "type": "text", + "content": "Frank F Xu, Yufan Song, Boxuan Li, Yuxuan Tang, Kritanjali Jain, Mengxue Bao, Zora Z Wang, Xuhui Zhou, Zhitong Guo, Murong Cao, et al. Theagentcompany: benchmarking llm agents on consequential real world tasks. arXiv preprint arXiv:2412.14161, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 646, + 504, + 692 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 646, + 504, + 692 + ], + "spans": [ + { + "bbox": [ + 107, + 646, + 504, + 692 + ], + "type": "text", + "content": "Yiheng Xu, Dunjie Lu, Zhennan Shen, Junli Wang, Zekun Wang, Yuchen Mao, Caiming Xiong, and Tao Yu. Agenttrek: Agent trajectory synthesis via guiding replay with web tutorials. In The Thirteenth International Conference on Learning Representations, 2025. URL https://openreview.net/forum?id=EEgYUccwsV." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 697, + 504, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 697, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 107, + 697, + 504, + 732 + ], + "type": "text", + "content": "Shunyu Yao, Howard Chen, John Yang, and Karthik Narasimhan. Webshop: Towards scalable real-world web interaction with grounded language agents. Advances in Neural Information Processing Systems, 35:20744-20757, 2022." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 376 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 105, + 81, + 507, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 507, + 116 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 507, + 116 + ], + "type": "text", + "content": "Shunyu Yao, Noah Shinn, Pedram Razavi, and Karthik Narasimhan. " + }, + { + "bbox": [ + 105, + 81, + 507, + 116 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 105, + 81, + 507, + 116 + ], + "type": "text", + "content": "-bench: A benchmark for tool-agent-user interaction in real-world domains. arXiv preprint arXiv:2406.12045, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 505, + 157 + ], + "type": "text", + "content": "Ori Yoran, Samuel Joseph Amouyal, Chaitanya Malaviya, Ben Bogin, Ofir Press, and Jonathan Berant. Assistantbench: Can web agents solve realistic and time-consuming tasks? arXiv preprint arXiv:2407.15711, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 163, + 507, + 232 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 163, + 507, + 232 + ], + "spans": [ + { + "bbox": [ + 105, + 163, + 507, + 232 + ], + "type": "text", + "content": "Wenhao Yu, Nimrod Gileadi, Chuyuan Fu, Sean Kirmani, Kuang-Huei Lee, Montserrat Gonzalez Arenas, Hao-Tien Lewis Chiang, Tom Erez, Leonard Hasenclever, Jan Humplik, brian richter, Ted Xiao, Peng Xu, Andy Zeng, Tingnan Zhang, Nicolas Heess, Dorsa Sadigh, Jie Tan, Yuval Tassa, and Fei Xia. Language to rewards for robotic skill synthesis. In 7th Annual Conference on Robot Learning, 2023. URL https://openreview.net/forum?id= SgTPdyehXMA." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 237, + 505, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 237, + 505, + 272 + ], + "spans": [ + { + "bbox": [ + 105, + 237, + 505, + 272 + ], + "type": "text", + "content": "Longtao Zheng, Rundong Wang, Xinrun Wang, and Bo An. Synapse: Trajectory-as-exemplar prompting with memory for computer control. In The Twelfth International Conference on Learning Representations, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 277, + 507, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 277, + 507, + 335 + ], + "spans": [ + { + "bbox": [ + 105, + 277, + 507, + 335 + ], + "type": "text", + "content": "Shuyan Zhou, Frank F. Xu, Hao Zhu, Xuhui Zhou, Robert Lo, Abishek Sridhar, Xianyi Cheng, Tianyue Ou, Yonatan Bisk, Daniel Fried, Uri Alon, and Graham Neubig. Webarena: A realistic web environment for building autonomous agents. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=oKn9c6ytLx." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 341, + 507, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 341, + 507, + 376 + ], + "spans": [ + { + "bbox": [ + 105, + 341, + 507, + 376 + ], + "type": "text", + "content": "Yifei Zhou, Qianlan Yang, Kaixiang Lin, Min Bai, Xiong Zhou, Yu-Xiong Wang, Sergey Levine, and Erran Li. Proposer-agent-evaluator (pae): Autonomous skill discovery for foundation model internet agents. arXiv preprint arXiv:2412.13194, 2024b." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 80, + 236, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 80, + 236, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 80, + 236, + 95 + ], + "type": "text", + "content": "A Experiment Details" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 108, + 224, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 108, + 224, + 121 + ], + "spans": [ + { + "bbox": [ + 105, + 108, + 224, + 121 + ], + "type": "text", + "content": "A.1 Agent Action Space" + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 146, + 172, + 462, + 357 + ], + "blocks": [ + { + "bbox": [ + 104, + 130, + 506, + 165 + ], + "lines": [ + { + "bbox": [ + 104, + 130, + 506, + 165 + ], + "spans": [ + { + "bbox": [ + 104, + 130, + 506, + 165 + ], + "type": "text", + "content": "Table 7 shows the default action space the web navigation agents we employed in all the experiments. This action space remains the same for both (i) static, vanilla agent, as well as the (ii) adaptive agent that learn textual skills in memory, i.e., AWM." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 146, + 172, + 462, + 357 + ], + "lines": [ + { + "bbox": [ + 146, + 172, + 462, + 357 + ], + "spans": [ + { + "bbox": [ + 146, + 172, + 462, + 357 + ], + "type": "table", + "html": "
Action TypeDescription
noopwait_ms)Do nothing for specified time.
click(elem)Click at an element.
hover(elem)Hover on an element.
fill(elem, value)Type into an element.
keyboard_press(key_comb)Press a key combination.
scroll(x, y)Scroll horizontally or vertically.
select_option(elem, options)Select one or multiple options.
goto(url)Navigate to a url.
go_back()Navigate to the previous page.
go_forward()Navigate to the next page.
new_tab()Open a new tab.
tab_close()Close the current tab.
tabfocus(index)Bring tab to front.
send msg_to_user(text)Send a message to the user.
report_infeasible Reason)Notify user that instructions are infeasible.
", + "image_path": "1aaa066e75e3a385f9f9d19b3acdea81acd51a20e825389f3c2d58675f3d0989.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 155, + 360, + 455, + 373 + ], + "lines": [ + { + "bbox": [ + 155, + 360, + 455, + 373 + ], + "spans": [ + { + "bbox": [ + 155, + 360, + 455, + 373 + ], + "type": "text", + "content": "Table 7: Initial action space for agents throughout our experiments." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 398, + 304, + 411 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 398, + 304, + 411 + ], + "spans": [ + { + "bbox": [ + 105, + 398, + 304, + 411 + ], + "type": "text", + "content": "A.2 Prompts for LLM-Based Components" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 121, + 424, + 264, + 437 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 424, + 264, + 437 + ], + "spans": [ + { + "bbox": [ + 121, + 424, + 264, + 437 + ], + "type": "text", + "content": "Prompt for Episode Evaluation" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 119, + 447, + 491, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 447, + 491, + 514 + ], + "spans": [ + { + "bbox": [ + 119, + 447, + 491, + 514 + ], + "type": "text", + "content": "You are an expert in evaluating the performance of a web navigation agent. The agent is designed to help a human user navigate a website to complete a task. Given the user's intent, the agent's action history, the final state of the webpage, and the agent's response to the user, your goal is to decide whether the agent's execution is successful or not. Please be careful of each detail and strict about the evaluation process." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 119, + 513, + 492, + 678 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 513, + 492, + 678 + ], + "spans": [ + { + "bbox": [ + 119, + 513, + 492, + 678 + ], + "type": "text", + "content": "There are three types of tasks: 1. Information seeking: The user wants to obtain certain information from the webpage, such as the information of a product, reviews, map info, comparison of map routes, etc. The bot's response must contain the information the user wants, or explicitly state that the information is not available. Otherwise, e.g. the bot encounters an exception and respond with the error content, the task is considered a failure. Besides, be careful about the sufficiency of the agent's actions. For example, when asked to list the top-searched items in a shop, the agent should order the items by the number of searches, and then return the top items. If the ordering action is missing, the task is likely to fail. 2. Site navigation: The user wants to navigate to a specific page. Carefully examine the bot's action history and the final state of the webpage to determine whether the bot successfully completes the task. No need to consider the bot's response. 3. Content modification: The user wants to modify the content of a webpage or configuration. Carefully examine the bot's action history and the final state of the webpage to determine whether the bot successfully completes the task. No need to consider the bot's response." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 120, + 677, + 490, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 677, + 490, + 700 + ], + "spans": [ + { + "bbox": [ + 120, + 677, + 490, + 700 + ], + "type": "text", + "content": "*IMPORTANT* Please be strict about the evaluation process. Format your response into two lines as shown below:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 120, + 700, + 350, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 700, + 350, + 712 + ], + "spans": [ + { + "bbox": [ + 120, + 700, + 350, + 712 + ], + "type": "text", + "content": "Thoughts: " + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 120, + 711, + 252, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 711, + 252, + 721 + ], + "spans": [ + { + "bbox": [ + 120, + 711, + 252, + 721 + ], + "type": "text", + "content": "Status: \"success\" or \"failure\"" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 83, + 257, + 96 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 83, + 257, + 96 + ], + "spans": [ + { + "bbox": [ + 121, + 83, + 257, + 96 + ], + "type": "text", + "content": "Prompt for Episode Cleaning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 120, + 106, + 489, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 106, + 489, + 140 + ], + "spans": [ + { + "bbox": [ + 120, + 106, + 489, + 140 + ], + "type": "text", + "content": "You are a helpful assistant in summarizing web browsing actions. Your task is to summarize the main action taken and the corresponding state change, from the input thought process and environmental description." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 120, + 140, + 490, + 249 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 140, + 490, + 249 + ], + "spans": [ + { + "bbox": [ + 120, + 140, + 490, + 249 + ], + "type": "text", + "content": "Example input: \"Let me think about this step by step: 1) I've clicked the Columns button and based on the accessibility tree, I can now see a list of available columns that can be shown/hidden. 2) Looking through the available columns in the accessibility tree, I don't see any direct column for \"Number of Orders\" or similar metric. 3) Since I don't see the information I need in the available columns, this suggests that: - Either this information is available through a different path - Or we need to access the Sales/Orders section to correlate this information 4) The most logical path would be to: - Go to the Sales section where we can see all orders - And then analyze which customers have the most orders 5) From the accessibility tree, I can see the \"SALES\" link with bid \"183\" in the main navigation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 120, + 249, + 488, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 249, + 488, + 270 + ], + "spans": [ + { + "bbox": [ + 120, + 249, + 488, + 270 + ], + "type": "text", + "content": "Let me click on the SALES link: This should open the Sales menu where we can access the Orders section to analyze customer order histories.\"" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 120, + 270, + 489, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 270, + 489, + 293 + ], + "spans": [ + { + "bbox": [ + 120, + 270, + 489, + 293 + ], + "type": "text", + "content": "Example output: \"Clicked the SALES link to access the Orders section for customer order histories.\"" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 121, + 310, + 246, + 321 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 310, + 246, + 321 + ], + "spans": [ + { + "bbox": [ + 121, + 310, + 246, + 321 + ], + "type": "text", + "content": "Prompt for Skill Induction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 120, + 332, + 489, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 332, + 489, + 366 + ], + "spans": [ + { + "bbox": [ + 120, + 332, + 489, + 366 + ], + "type": "text", + "content": "You are a proficient software engineer. Your task is to (1) summarize reusable functions as APIs from the provided action trajectories, and (2) rewrite the trajectories using the reusable functions you generated in (1)." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 120, + 366, + 490, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 366, + 490, + 442 + ], + "spans": [ + { + "bbox": [ + 120, + 366, + 490, + 442 + ], + "type": "text", + "content": "For (1), from the provided examples about the same task, you job is to generate Python functions that can be reused to solve (part of) these tasks. The functions should have mediocre complexity: (i) containing at least three actions and not too simple (e.g., a single line of code), (ii) not too complex (e.g., more than 10 lines of code), and should be general enough to be applied to other similar tasks. The arguments to these functions should be common variables (such as strings and lists), avoid using complex inputs such as another function." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 120, + 442, + 490, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 442, + 490, + 485 + ], + "spans": [ + { + "bbox": [ + 120, + 442, + 490, + 485 + ], + "type": "text", + "content": "The actions you can use are: 'click', 'fill', 'hover', 'keyboard_press', 'scroll'; 'tabfocus', 'new_tab', 'tab_close', 'go_back', 'go_forward'; 'send msg to user', 'report_infeasible', 'select_option'. Do not use other undefined actions. Do not include any try-except blocks in the functions." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 120, + 486, + 490, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 486, + 490, + 552 + ], + "spans": [ + { + "bbox": [ + 120, + 486, + 490, + 552 + ], + "type": "text", + "content": "Please include 'Args', 'Returns', and 'Examples' in the function documentation. For (2), write the instruction and rewritten code of each example. Do not include the answer response or example-specific information in the rewritten code. Pay attention to whether all link IDs are available before specifying them in the generated functions. If you use 'send msg_to_user', make sure the message is decided within the function, instead of provided as an argument." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 120, + 552, + 490, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 552, + 490, + 586 + ], + "spans": [ + { + "bbox": [ + 120, + 552, + 490, + 586 + ], + "type": "text", + "content": "Make sure each function contains no less than 2 steps, and no more than 5 steps; to keep the functions simple and task-oriented. You can generate zero, one, or multiple functions depending on the provided examples." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 613, + 266, + 627 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 613, + 266, + 627 + ], + "spans": [ + { + "bbox": [ + 105, + 613, + 266, + 627 + ], + "type": "text", + "content": "B Skill Induction: Analysis" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 639, + 504, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 639, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 639, + 504, + 662 + ], + "type": "text", + "content": "We provide more details about the skill induction process, in skill curation and reuse frequency (§B.1) and representative skill case studies (§B.2)" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 677, + 277, + 690 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 277, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 277, + 690 + ], + "type": "text", + "content": "B.1 Skill Induction and Reusability" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 504, + 733 + ], + "type": "text", + "content": "To provide more insights on how agents curate and reuse programmatic skills, for the main experiments on WebArena, we calculate the number of examples that (i) attempt to induce a new skill, (ii) successfully induce a new skill, and (iii) reuse a previously induced skill." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "text", + "content": "As shown in Table 8, the agent reuses at least 1 skill for " + }, + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "inline_equation", + "content": "42.5\\%" + }, + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "text", + "content": " of the cases, which is pretty frequent across websites. Moreover, these skills are created using " + }, + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "inline_equation", + "content": "7.6\\%" + }, + { + "bbox": [ + 104, + 82, + 506, + 118 + ], + "type": "text", + "content": " of the tasks, demonstrating the high scalability of our skill learning approach." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 126, + 121, + 482, + 203 + ], + "blocks": [ + { + "bbox": [ + 126, + 121, + 482, + 203 + ], + "lines": [ + { + "bbox": [ + 126, + 121, + 482, + 203 + ], + "spans": [ + { + "bbox": [ + 126, + 121, + 482, + 203 + ], + "type": "table", + "html": "
DomainAttempted InductionNumber of Examples
Successful InductionReuse ExamplesTotal
shopping21896180
admin3815108156
reddit241114105
map131080109
gitlab251110175
", + "image_path": "a5be42cba2d45ce05778e8e19e0b347f9774925e80aff099ef819a4fa5a5e123.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 123, + 205, + 485, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 205, + 485, + 218 + ], + "spans": [ + { + "bbox": [ + 123, + 205, + 485, + 218 + ], + "type": "text", + "content": "Table 8: Analyzing the number of examples that attempt, induce, and reuse skills." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 237, + 230, + 250 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 237, + 230, + 250 + ], + "spans": [ + { + "bbox": [ + 105, + 237, + 230, + 250 + ], + "type": "text", + "content": "B.2 Representative Skills" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 258, + 504, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 258, + 504, + 282 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 504, + 282 + ], + "type": "text", + "content": "We demonstrate two representative types of skills, which (i) chain frequently-used-together actions, and (ii) involve advanced programming primitives." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 293, + 506, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 293, + 506, + 338 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 506, + 338 + ], + "type": "text", + "content": "Chaining Primitive Actions The most common type of skills chains multiple frequently-used-together primitive actions (e.g., click, fill) in a certain order, to reflect a particular common usage, such as the search_product tool illustrated below. This type of skill provides an easy portal for frequent workflows." + } + ] + } + ], + "index": 6 + }, + { + "type": "code", + "bbox": [ + 100, + 342, + 280, + 406 + ], + "blocks": [ + { + "bbox": [ + 100, + 342, + 280, + 406 + ], + "lines": [ + { + "bbox": [ + 100, + 342, + 280, + 406 + ], + "spans": [ + { + "bbox": [ + 100, + 342, + 280, + 406 + ], + "type": "text", + "content": "def search_product(name: str):\n click(\"Search textbox...\")\n if has_pop_window():\n click(\"Close\")\n fill(\"Search textbox\", name)\n click(\"Search button\")" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "code_body" + } + ], + "index": 7, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 104, + 426, + 506, + 450 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 426, + 506, + 450 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 506, + 450 + ], + "type": "text", + "content": "Advanced Programming Primitives Besides a sequential chaining of actions, some skills contain more advanced logics, such as if-else or for/while loops, as the two examples below." + } + ] + } + ], + "index": 8 + }, + { + "type": "code", + "bbox": [ + 95, + 453, + 485, + 657 + ], + "blocks": [ + { + "bbox": [ + 95, + 453, + 485, + 657 + ], + "lines": [ + { + "bbox": [ + 95, + 453, + 485, + 657 + ], + "spans": [ + { + "bbox": [ + 95, + 453, + 485, + 657 + ], + "type": "text", + "content": "def navigate_and_sort_category(category_id: str, subcategory_id: str | None = None):\n '''\"Navigate to a product category and sort items by price.\nArgs:\n category_id: ID of the main category link\n subcategory_id: Optional ID of the subcategory link, if needed\nReturns:\n None\nExamples:\n navigate_and_sort_category('1241', '1873') # PS4 under Video Games\n navigate_and_sort_category('1245') # For main category only\n '''' click(category_id) # Click main category\nif subcategory_id:\n click(subcategory_id) # Click subcategory if provided\nselect_option(\"1553\", \"Price\") # Sort by price ascending" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "code_body" + } + ], + "index": 9, + "sub_type": "code", + "guess_lang": "python" + }, + { + "type": "code", + "bbox": [ + 95, + 670, + 501, + 733 + ], + "blocks": [ + { + "bbox": [ + 95, + 670, + 501, + 733 + ], + "lines": [ + { + "bbox": [ + 95, + 670, + 501, + 733 + ], + "spans": [ + { + "bbox": [ + 95, + 670, + 501, + 733 + ], + "type": "text", + "content": "def browse_category_byjahicationmenu_id_sequence: list):\n \"\"Browse products by navigating through a sequence of menu IDs.\n This function allows navigation through a series of menu interactions.\n Args:\n menu_id_sequence(list): A list of menu IDs to interact\n sequentially, using hover actions followed by a click." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "code_body" + } + ], + "index": 10, + "sub_type": "code", + "guess_lang": "python" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 316, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 316, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 316, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 95, + 83, + 508, + 198 + ], + "blocks": [ + { + "bbox": [ + 95, + 83, + 508, + 198 + ], + "lines": [ + { + "bbox": [ + 95, + 83, + 508, + 198 + ], + "spans": [ + { + "bbox": [ + 95, + 83, + 508, + 198 + ], + "type": "text", + "content": "Example usage: browse_category_byjahgnation['735','786','797']#Navigates Home&Kitchen->Storage&Organization->Racks,Shelves& Drawers \n\"''\" foridx,menu_idin enumerate(menu_id_sequence[-1]): hover/menu_id) click菜单_id_sequence[-1])#Click the final id to land in the predefined category" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 104, + 221, + 321, + 236 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 221, + 321, + 236 + ], + "spans": [ + { + "bbox": [ + 104, + 221, + 321, + 236 + ], + "type": "text", + "content": "C Scaled-Up and Cross-Website Tests" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 247, + 506, + 282 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 506, + 282 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 506, + 282 + ], + "type": "text", + "content": "We provide the full list of tasks used in scaled-up (S4) and cross-website (S5) analyses in S.C.1 and S.C.2, respectively. In S.C.3, we further perform significance testing to validate the findings from Table 4 and Table 6." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 296, + 209, + 309 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 296, + 209, + 309 + ], + "spans": [ + { + "bbox": [ + 105, + 296, + 209, + 309 + ], + "type": "text", + "content": "C.1 Scaled-Up Tasks" + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 106, + 352, + 504, + 602 + ], + "blocks": [ + { + "bbox": [ + 104, + 317, + 504, + 342 + ], + "lines": [ + { + "bbox": [ + 104, + 317, + 504, + 342 + ], + "spans": [ + { + "bbox": [ + 104, + 317, + 504, + 342 + ], + "type": "text", + "content": "Table 9, Table 10, Table 11, Table 12 and Table 13 shows example scaled-up tasks studied on the shopping, admin, social forum, software development, and map websites." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 352, + 504, + 602 + ], + "lines": [ + { + "bbox": [ + 106, + 352, + 504, + 602 + ], + "spans": [ + { + "bbox": [ + 106, + 352, + 504, + 602 + ], + "type": "table", + "html": "
InstructionCheckpointsScore
Add a wireless headphone, a water bottle, a notebook, a ground coffee, and a mug to my shopping cart.Add a wireless headphone to cart; Add a water bottle to cart; Add a notebook to cart; Add a ground coffee to cart; Add a mug to cart.5
Add the most expensive item from the video games category, the cheapest item from the Office Products category, and the most relevant coffee mug to my shopping cart.Add the most expensive item from the video games category to cart; Add the cheapest item from the Office Products category to cart; the most relevant coffee mug to my shopping cart.3
Add the cheapest wireless headphone, a water bottle, the most expensive notebook, a ground coffee, and a mug to my shopping cart.Add the cheapest wireless headphone to cart; Add a water bottle to cart; Add the most expensive notebook to cart; Add a ground coffee to cart; Add a mug to cart.5
Show me the ordered items for each cancelled order from Feb to May in 2023.Show me the 5/17/23 order; Show me the 2/24/23 order; Show me the 2/11/23 order.3
Iterative update my billing address to 231 Willow Way, Suite 100, Chicago, IL, 60601. Then, update my shipping address to 987 Sycamore Circle, Philadelphia, PA, 19102.Successfully update my billing address; Successfully update my shipping address.2
", + "image_path": "744d7a242930e30176d7727b1c46da76ad345db665e2c589a8713ff65ca7e1c6.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 149, + 609, + 460, + 622 + ], + "lines": [ + { + "bbox": [ + 149, + 609, + 460, + 622 + ], + "spans": [ + { + "bbox": [ + 149, + 609, + 460, + 622 + ], + "type": "text", + "content": "Table 9: Exemplar scaled-up browsing tasks on the shopping website." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 643, + 227, + 654 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 643, + 227, + 654 + ], + "spans": [ + { + "bbox": [ + 105, + 643, + 227, + 654 + ], + "type": "text", + "content": "C.2 Cross-Website Tasks" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 664, + 506, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 664, + 506, + 700 + ], + "spans": [ + { + "bbox": [ + 104, + 664, + 506, + 700 + ], + "type": "text", + "content": "Table 14, Table 15, and Table 16 lists example tasks to test agent generalization abilities on shopping (OneStopMarket to Target), social forum (Postmill to Reddit), and software development (GitLab to GitHub) domains." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 713, + 225, + 726 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 713, + 225, + 726 + ], + "spans": [ + { + "bbox": [ + 105, + 713, + 225, + 726 + ], + "type": "text", + "content": "C.3 Significance Testing" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 79, + 504, + 430 + ], + "blocks": [ + { + "bbox": [ + 106, + 79, + 504, + 430 + ], + "lines": [ + { + "bbox": [ + 106, + 79, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 106, + 79, + 504, + 430 + ], + "type": "table", + "html": "
InstructionCheckpointsScore
Tell me the number of reviews that our store received by far that mention terms 'disappointed', 'satisfied', 'decent', 'not useful', and 'best'.Return the correct number for terms 'disappointed', 'satisfied', 'decent', 'not useful', and 'best'.5
I need to contact a list of customers. Find the customer name and email with phone number 2058812302, 2137418080, 2065555555, 8015551212, and 555-229-3326.Return the correct name and email information for customers with each of the five phone numbers.5
I will need to update our webpage to create a more energetic vibe. Change the page title of '404 Not Found' to 'Bruh bro you clicked the wrong page', the page title of 'Enable Cookies' to 'Cookie monster coming to your place', the page title of 'Home Page' page to 'This is the home page!!', the page with title 'Privacy Policy' to 'No privacy policy is needed is this dystopian world', and lastly, change the page 'About Us' to 'Secret'.Change the page title correctly for each of the five pages.5
I need to generate a bunch of report to show to the store manager in an hour. Could you help me generate a sales order report for the last month, over the last 45 days, and for Q1? I'll also need a refund report for last year, and a tax report for this year. Today is 3/15/2023.Generate a sales report for 2/1/2023-2/29/2023; generate a sales report for 1/29/2023-3/15/2023; generate a sales report for 1/1/2023-3/15/2023; Generate a refund report for 1/1/2022-12/31/2022; Generate a tax report for 1/1/2023-3/15/2023.5
Tell me the SKU of products that have 10 units, 3 units, and 0 units left. Also, give me the product names that have 2-3 units left.Return the correct SKU for the first three questions; return the correct product names for the last question.4
", + "image_path": "802ab4997fb966f03cc6360d294c6427efd24e884407d8c1be5c83e3522e3a9a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 469, + 258, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 469, + 258, + 568 + ], + "spans": [ + { + "bbox": [ + 104, + 469, + 258, + 568 + ], + "type": "text", + "content": "Scaled-Up Tasks We conduct t-tests between (i) ASI and AWM, (ii) ASI and VANILLA agent. From the results in Table 17, we find the advantage of ASI in success rate and efficiency improvements, when comparing to both AWM and VANILLA agents, are statistically significant, as indicated by all" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 568, + 382, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 568, + 382, + 580 + ], + "spans": [ + { + "bbox": [ + 104, + 568, + 382, + 580 + ], + "type": "text", + "content": "t-statistics with absolute values over 2 and p-value below 0.05." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 265, + 470, + 502, + 521 + ], + "blocks": [ + { + "bbox": [ + 132, + 436, + 477, + 449 + ], + "lines": [ + { + "bbox": [ + 132, + 436, + 477, + 449 + ], + "spans": [ + { + "bbox": [ + 132, + 436, + 477, + 449 + ], + "type": "text", + "content": "Table 10: Exemplar scaled-up browsing tasks on the shopping admin website." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 265, + 470, + 502, + 521 + ], + "lines": [ + { + "bbox": [ + 265, + 470, + 502, + 521 + ], + "spans": [ + { + "bbox": [ + 265, + 470, + 502, + 521 + ], + "type": "table", + "html": "
Method PairSuccess Rate# Steps
t-statp-valuet-statp-value
ASI vs. AWM-2.36010.02262.76640.0068
ASI vs. VANILLA-4.09220.00022.19830.0296
", + "image_path": "a48f320b1a2f55cba3570d504b013d6a4c5591b774509c0aaa4f65a44c16948a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 591, + 258, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 591, + 258, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 591, + 258, + 691 + ], + "type": "text", + "content": "Cross-Web Tasks We conduct similar significance testing on cross-web tasks and report the results in Table 18. While ASI does not significantly outperform AWM in success rate, given the presumably greater flexibility of textual workflows, ASI still exhibits significant advantages on" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 691, + 506, + 725 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 691, + 506, + 725 + ], + "spans": [ + { + "bbox": [ + 104, + 691, + 506, + 725 + ], + "type": "text", + "content": "the efficiency side. Furthermore, comparing ASI to static VANILLA agents, ASI achieves significant improvements in both success rates and efficiency (i.e., reduced number of steps), suggested by " + }, + { + "bbox": [ + 104, + 691, + 506, + 725 + ], + "type": "inline_equation", + "content": "|t| > 2" + }, + { + "bbox": [ + 104, + 691, + 506, + 725 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 691, + 506, + 725 + ], + "type": "inline_equation", + "content": "p < 0.05" + }, + { + "bbox": [ + 104, + 691, + 506, + 725 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 265, + 599, + 502, + 651 + ], + "blocks": [ + { + "bbox": [ + 263, + 527, + 506, + 552 + ], + "lines": [ + { + "bbox": [ + 263, + 527, + 506, + 552 + ], + "spans": [ + { + "bbox": [ + 263, + 527, + 506, + 552 + ], + "type": "text", + "content": "Table 17: Results of significance testing on ASI's advantages for scaled-up web tasks." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 265, + 599, + 502, + 651 + ], + "lines": [ + { + "bbox": [ + 265, + 599, + 502, + 651 + ], + "spans": [ + { + "bbox": [ + 265, + 599, + 502, + 651 + ], + "type": "table", + "html": "
Method PairSuccess Rate# Steps
t-statp-valuet-statp-value
ASI vs. AWM-1.39800.16732.12380.0378
ASI vs. VANILLA-3.59840.00072.57920.0125
", + "image_path": "8109e663c1fd04718516750d3b79702b899db5b9302f97f72deea8dfb48e50d0.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 263, + 656, + 506, + 680 + ], + "lines": [ + { + "bbox": [ + 263, + 656, + 506, + 680 + ], + "spans": [ + { + "bbox": [ + 263, + 656, + 506, + 680 + ], + "type": "text", + "content": "Table 18: Results of significance testing on ASI's advantages for cross-web tasks." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 95, + 504, + 366 + ], + "blocks": [ + { + "bbox": [ + 106, + 95, + 504, + 366 + ], + "lines": [ + { + "bbox": [ + 106, + 95, + 504, + 366 + ], + "spans": [ + { + "bbox": [ + 106, + 95, + 504, + 366 + ], + "type": "table", + "html": "
InstructionCheckpointsScore
I'm planning to organize multiple meetings in the next few months. Help me post notices on virtual meetings for the little women on Apr 10th, for Harry Potter in May 15th, and for Jane Eyre in Jan 30th, in the most suitable forums in PostMill.Post Apr 10th meetup; Post about May 15th meetup; Post Jan 30th meetup. All in book-related forums.3
Could you tell me all forums with names related to computer science?must include: deeplearning (1 pt), Machine-Learning (1 pt); optionally (get 1 score if include any): science, askscience, technology.3
Find the most relevant posts about jerseycity, newjersey, and nyc; and tell me how different they are.Correctly find post about jerseycity; Correctly find post about newjersey; Correctly find post about nyc; Answer how different they are.4
Thumbs down the top-2 posts in jerseycity, newjersey, and nyc forums, I don't like them.Thumbs down the top-2 posts in the jerseycity forum; Thumbs down the top-2 posts in the newjersey forum; Thumbs down the top-2 posts in the nyc forum.3
Reply "Thank you! This is super helpful!" to three posts about long-distance relationship advice.Reply to three posts with the correct message. Need to be relevant to long-distance relationship advice.3
", + "image_path": "f55cd46683e3679b683a331e175b0d050927180d5bf9ae39211680600432ba2f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 106, + 423, + 504, + 694 + ], + "blocks": [ + { + "bbox": [ + 173, + 373, + 437, + 385 + ], + "lines": [ + { + "bbox": [ + 173, + 373, + 437, + 385 + ], + "spans": [ + { + "bbox": [ + 173, + 373, + 437, + 385 + ], + "type": "text", + "content": "Table 11: Exemplar scaled-up tasks on the Postmill website." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 423, + 504, + 694 + ], + "lines": [ + { + "bbox": [ + 106, + 423, + 504, + 694 + ], + "spans": [ + { + "bbox": [ + 106, + 423, + 504, + 694 + ], + "type": "table", + "html": "
InstructionCheckpointsScore
Display the list of issues in the a11yproject/a11yproject.com repository that have labels related to 'help needed', and assign the most recent one to the top contributor of this repository.Display the help-wanted issues; find the top contributor; assign him to the most recent help-needed issue.3
Set up a new, empty repository with the name agent_skill_induction, and create a MIT license file. Then, invite Abishek and Vinta as collaborators.Create a new repository with given name; Create a MIT license inside; Invite both collaborators.3
Start a private project web_agent.android xl with Android template and add primer, convexegg, abishek as members.Create the repository private and with Android template; Invite all three people as members.2
Add the following users to repo a11y-webring.club as developer: [abisubramanya27, lahwaacz], and [yjlou, a11yproject] as maintainer.Add abisubramanya27 and lahwaacz as developers; Add yjlou and a11yproject as maintainers.2
Add the following users [abisubramanya27, lahwaacz, yjlou, a11yproject] to repo a11y-webring.club, make sure to assign them different roles.Add abisubramanya27 with role 1; Add lahwaacz with role 2; Add yjlou with role 3; Add a11yproject as role 4. Role 1-4 need to be all different.4
", + "image_path": "32cefbcbef9f4c017763f3d35ba213c0ab16333e67dde8da90f90a5625d1eeb9.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 175, + 700, + 435, + 714 + ], + "lines": [ + { + "bbox": [ + 175, + 700, + 435, + 714 + ], + "spans": [ + { + "bbox": [ + 175, + 700, + 435, + 714 + ], + "type": "text", + "content": "Table 12: Exemplar scaled-up tasks on the GitLab website." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 129, + 504, + 408 + ], + "blocks": [ + { + "bbox": [ + 106, + 129, + 504, + 408 + ], + "lines": [ + { + "bbox": [ + 106, + 129, + 504, + 408 + ], + "spans": [ + { + "bbox": [ + 106, + 129, + 504, + 408 + ], + "type": "table", + "html": "
InstructionCheckpointsScore
Search for the closest restaurants, cafes, parking, and banks to Carnegie Mellon University on the map.Return the closest restaurants; Return the closest cafes; Return the closest parking; Return the closest banks.4
I will need to go to multiple places from Carnegie Mellon University today, including the Univ of Pittsburgh, UPMC shady-side, the Schenley park, and Squirrel Hill. Could you should me the driving route to all those places?Show me driving route from CMU to UPitt; Show me driving route from CMU to UPMC; Show me driving route from CMU to Schenley Park; Show me driving route from CMU to Squirrel Hill.4
Show me the route of driving from CMU to University of Pittsburgh, then walking to the Schenley Park; next, bike to UPMC shadyside, and walk to Squirrel Hill after that.Show me CMU → Upitt route by car; Show me Upitt → Schenley Park route by foot; Show me Schenley Park → UPMC route by bike; Show me UPMC → Squirrel Hill route by foot.4
Check if the Univ of Pittsburgh, UPMC shadyside, schenley park, and squirrel hill can be reached within one hour by walking, if departing from Carnegie Mellon University.Return yes to route 1, route 2, route 3, and route 4.4
Tell me the coordinates of Univ of Pittsburgh, UPMC shadyside, schenley park, squirrel hill, and CMU in DD format.Return the coordinates of each of the four places.4
", + "image_path": "1ff0af4eb9d2ef32997f69eebd000de698114a0aa8fdfd862945d1007cd5f3ec.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 106, + 532, + 504, + 662 + ], + "blocks": [ + { + "bbox": [ + 180, + 415, + 428, + 428 + ], + "lines": [ + { + "bbox": [ + 180, + 415, + 428, + 428 + ], + "spans": [ + { + "bbox": [ + 180, + 415, + 428, + 428 + ], + "type": "text", + "content": "Table 13: Exemplar scaled-up tasks on the Map website." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 532, + 504, + 662 + ], + "lines": [ + { + "bbox": [ + 106, + 532, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 106, + 532, + 504, + 662 + ], + "type": "table", + "html": "
InstructionCheckpointsScore
Show me the options for Canon photo printer?Return the correct search result.1
I have a lot of Nintendo Switch game cards now, help me find the best storage option to fit all 11 cards.Return one valid product.1
What is the price range for beauty products?Return the correct price range.1
Show me products under $25 for woman shoesDisplay correct products.1
Show the least expensive shoe storage with a minimum storage capacity of 12 pairs.Display correct products.1
", + "image_path": "52294272a8d6f283c35d590bbc871337b4ac1a2662ec382ff8567094c2e77506.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 178, + 669, + 430, + 681 + ], + "lines": [ + { + "bbox": [ + 178, + 669, + 430, + 681 + ], + "spans": [ + { + "bbox": [ + 178, + 669, + 430, + 681 + ], + "type": "text", + "content": "Table 14: Exemplar shopping tasks on the target website." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 125, + 504, + 327 + ], + "blocks": [ + { + "bbox": [ + 106, + 125, + 504, + 327 + ], + "lines": [ + { + "bbox": [ + 106, + 125, + 504, + 327 + ], + "spans": [ + { + "bbox": [ + 106, + 125, + 504, + 327 + ], + "type": "table", + "html": "
InstructionCheckpointsScore
Tell me the names of books recommended in the latest five posts in the books forumFind the r/books forum; Find the most recent 5 posts; Give the correct answer.3
Tell me the titles of the 5 most recent posts about little women in the books forumFind the r/books forum; Find little women related posts; Sort the posts by newest.3
What are the recommended products for noise-canceling headphones within a bud-get of $200 in r/headphonesFind the r/headphones forum; Correctly search with noise-canceling, under $200 re-requirements; Return a valid headphone rec-ommendation.3
Find 3 pieces of advices about deal with long-distance relationships in a subreddit for relations.Navigate to a forum about relations; find at least 3 pieces of advice from relevant posts.2
Find if there are any jeep wrangler meetups. If so, when and where?Search in jeep wrangler related forums; Re-turn a valid answer based on the search result.2
", + "image_path": "d71556dcaf97c5216a9666f1fe442438dcc1304ce600f3c7a086e9010778a15b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 106, + 444, + 504, + 665 + ], + "blocks": [ + { + "bbox": [ + 171, + 333, + 440, + 345 + ], + "lines": [ + { + "bbox": [ + 171, + 333, + 440, + 345 + ], + "spans": [ + { + "bbox": [ + 171, + 333, + 440, + 345 + ], + "type": "text", + "content": "Table 15: Exemplar social forum tasks on the reddit website." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 444, + 504, + 665 + ], + "lines": [ + { + "bbox": [ + 106, + 444, + 504, + 665 + ], + "spans": [ + { + "bbox": [ + 106, + 444, + 504, + 665 + ], + "type": "table", + "html": "
InstructionCheckpointsScore
Tell me the full address of all international airports that are within a driving distance of 30 miles to Carnegie Mellon UniversityReturn Pittsburgh International Airport.1
I will arrive Pittsburgh Airport soon. Provide the name of a Hilton hotel in the vicinity, if available. Then, tell me the the walking distance to the nearest supermarket own by a local company from the hotel.Show me the hotels; Find a nearby supermarket; Show me the walking route from the hotel to the supermarket.3
Show me the walking route from nearby hotels to CMU, Pittsburgh that take at most 5 minutes?Find a hotel that meets the walking time requirement; Show me the walking route.2
I am at CMU Pittsburgh, how long it takes to the nearest USPS postal office with different transportation methods?Return travel time by car, by foot, by bus, and by bike.4
Tell me the coordinates of Carnegie Mellon Cafe in DD format.Return the correct coordinates.1
", + "image_path": "f7a8b6011454d378ab9e8ea447740255b4e8ead0c6b591b4e2c4a6fda28334ed.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 155, + 671, + 455, + 684 + ], + "lines": [ + { + "bbox": [ + 155, + 671, + 455, + 684 + ], + "spans": [ + { + "bbox": [ + 155, + 671, + 455, + 684 + ], + "type": "text", + "content": "Table 16: Exemplar social forum tasks on the Google Maps website." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06958/6f298a94-d9f1-4eed-a03e-720f559a2c57_content_list.json b/data/2025/2504_06xxx/2504.06958/6f298a94-d9f1-4eed-a03e-720f559a2c57_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..4f300a6cfa0ecfd988910b89dc5abe609e628803 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/6f298a94-d9f1-4eed-a03e-720f559a2c57_content_list.json @@ -0,0 +1,1721 @@ +[ + { + "type": "text", + "text": "VideoChat-R1: Enhancing Spatio-Temporal Perception via Reinforcement Fine-Tuning", + "text_level": 1, + "bbox": [ + 232, + 122, + 767, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xinhao Li $^{2,1*}$ , Ziang Yan $^{3,1*}$ , Desen Meng $^{2*}$ , Lu Dong $^{4,1}$ , Xiangyu Zeng $^{2,1}$ , Yinan He $^{1}$ , Yali Wang $^{6,1}$ , Yu Qiao $^{1,6}$ , Yi Wang $^{\\dagger 1,5}$ , Limin Wang $^{\\dagger 2,1}$", + "bbox": [ + 196, + 223, + 799, + 255 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Shanghai AI Laboratory $^{2}$ Nanjing University $^{3}$ Zhejiang University $^{4}$ University of Science and Technology of China $^{5}$ Shanghai Innovation Institute", + "bbox": [ + 256, + 255, + 740, + 282 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences https://github.com/OpenGVLab/VideoChat-R1", + "bbox": [ + 267, + 282, + 728, + 309 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 345, + 537, + 361 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Reinforcement Learning (RL) benefits Large Language Models (LLMs) for complex reasoning. Inspired by this, we explore integrating spatio-temporal specific rewards into Multimodal Large Language Models (MLLMs) to address the unique challenges of video understanding, such as long-range temporal associations. This paper investigates how rule-based rewards, particularly temporal ones, can improve video reasoning and their generalizability. Our study proposes Reinforcement Fine-Tuning (RFT) as a data-efficient method to enhance video reasoning on specific tasks without sacrificing original capabilities. Through joint RFT on multiple spatio-temporal perception tasks, we developed VideoChat-R1, a powerful Video MLLM. VideoChat-R1 achieves state-of-the-art spatio-temporal perception, demonstrating significant improvements in tasks like temporal grounding (+31.8) and object tracking (+31.2), while also improving general QA benchmarks. The enhanced perception and preserved chat abilities contribute to a more reliable video dialogue system, leading to our \"Temporal Clue-driven Reasoning\" inference schema. This work provides a foundation for developing robust, real-world video comprehension agents.", + "bbox": [ + 228, + 376, + 767, + 598 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 621, + 313, + 636 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The integration of reinforcement learning (RL) has notably propelled the capabilities of large language models (LLMs) forward, particularly in complex reasoning. Studies, such as o-series from OpenAI [12] and R1 of DeepSeek [9], demonstrated that test-time scaling improves model reasoning and such reasoning is elicit-able via only rule-based rewarding. Inspired by these advances, community is interested in tuning multimodal large language models (MLLMs) with vision-related verifications in Group Relative Policy Optimization (GRPO) [9] manner [43, 18, 38, 3, 22, 19, 35, 40, 39, 4]. They primarily consider visual mathematical reasoning and spatial localization.", + "bbox": [ + 169, + 651, + 826, + 751 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Regarding video understanding, reasoning is also crucial as some of its core abilities (e.g. its long-range temporal association, fine-grained spatiotemporal understanding based on user queries) are barely addressed via perception or simple analysis. Initial works [42, 27, 5] have validated the superiority of the GRPO algorithm over supervised fine-tuning in some specific video tasks, such as temporal grounding and video question answer. Considering advancing the agent development with real-world video comprehension, we need to investigate and integrate spatio-temporal specific rewards or verification mechanisms directly into the learning of current MLLMs in a scalable manner. This could foster a more tightly coupled, near-closed-loop learning environment, enabling MLLMs to master intricate spatio-temporal skills. While our current work may not fully achieve this ambitious", + "bbox": [ + 169, + 755, + 826, + 881 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.06958v5 [cs.CV] 11 Nov 2025", + "bbox": [ + 22, + 273, + 57, + 722 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "* Equal contribution. † Corresponding authors.", + "bbox": [ + 197, + 887, + 478, + 902 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Preprint. Under review.", + "bbox": [ + 171, + 922, + 315, + 936 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/6882c68a3a26b0712c0adff2c4095059248463c416caf7d765c85c49ea62ab41.jpg", + "image_caption": [ + "Figure 1: Overview of VideoChat-R1. Through reinforcement learning fine-tuning using GRPO, VideoChat-R1 has powerful spatio-temporal perception capabilities and can apply these capabilities in chatting scenarios." + ], + "image_footnote": [], + "bbox": [ + 181, + 93, + 816, + 290 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "goal, it endeavors to lay foundational groundwork for the community by exploring how to embed such mechanisms effectively. Specifically, we in this paper chart the landscape of how rule-based rewards (especially temporal related ones) work in video understanding, along with systematic evaluations of the its generalizability across diverse video-based reasoning scenarios.", + "bbox": [ + 169, + 386, + 823, + 444 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our study begins with spatio-temporal perception, extending to other popular video tasks (e.g. video caption), forming a practical recipe to enhance video reasoning via proper Reinforcement Fine-Tuning (RFT). We evaluate the robustness of various video verifications through different training and testing setting, also benchmarking their respective data requirements. Moreover, how these rewards interact with each other are also systematically explored. Based on these experiences, we give an intuitive and effective inference schema named \"Temporal Clue-driven Reasoning\". Generally, our main findings are as follows.", + "bbox": [ + 169, + 448, + 823, + 545 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Reinforcement fine-tuning is data-efficient for enhancing models on specific tasks without sacrificing original capabilities. With a small amount of data, training via RFT can yield a remarkable improvement in spatio-temporal perception ability, and there is negligible impact on the performance of out-domain tasks and the original general capabilities of the model, which outperforms traditional supervised fine-tuning significantly.", + "- Through joint reinforcement fine-tuning on multiple spatio-temporal perception tasks, we construct VideoChat-R1, a powerful Video MLLM that boasts state-of-the-art spatiotemporal perception capabilities while also taking into account chat abilities. We have also discovered that training on spatio-temporal perception tasks has slightly strengthened the model's spatio-temporal reasoning abilities. Compared with Qwen2.5-VL-7B, VideoChat-R1 achieves several times the performance improvement in spatiotemporal perception tasks such as temporal grounding (+31.8) and object track (+31.2). At the same time, it also achieves non-trivial improvements on general QA benchmarks, such as VideoMME (+0.9), MVBench (+1.0), and Perception Test (+0.9)", + "- The improvement of spatio-temporal perception ability and the preservation of the original chat capability can contribute to a more reliable and efficient video dialogue system. Our VideoChat-R1 can provide reference video segments when answering users' questions. Meanwhile, we propose to utilize these video segments for \"Temporal Clue-driven Reasoning\" to further obtain more accurate answers. Our experimental results reveal the potential of the approach that enhances the model's spatiotemporal perception ability through reinforcement learning for future research in the directions of reliable video dialogue systems and long video understanding." + ], + "bbox": [ + 215, + 569, + 826, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related work", + "text_level": 1, + "bbox": [ + 171, + 89, + 318, + 104 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Reasoning in MLLMs. The field LLMs has witnessed significant breakthroughs in reasoning, exemplified by recent works [12, 9]. These developments [23, 9, 24] have markedly improved LLMs' proficiency in tackling complex, multi-step tasks, such as challenging mathematical problems and coding exercises. Extending these RL techniques to multimodal large language models (MLLMs) has seen numerous efforts [43, 18, 38, 3, 22, 19, 35, 40, 39, 4] focused on leveraging verifiable reward mechanisms to boost visual reasoning performance. However, the application of RL-based strategies to spatio-temporal reasoning within the video domain remains comparatively less explored, with a few studies [27, 42, 5] investigating this area. [27] and [42] show GRPO's potential in temporal grounding and sentiment analysis, while, [5] demonstrates how GRPO can facilitate temporal reasoning.", + "bbox": [ + 169, + 121, + 826, + 261 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Spatio-Temporal Understanding with MLLMs. Video understanding heavily relies on spatiotemporal perception. Progress in Video Multimodal Large Language Models (video MLLMs) [14, 20, 15, 28, 41, 29, 17, 11, 1] addresses video question answering and captioning in the unfed dialogue format, leveraging LLMs to organize predictions over visual perceptions. [36] and [37] leverage data augmentation for temporal modeling, yet compromising their general performance. [33] improves fine-grained spatio-temporal perception by task preference optimization with solid supervised fine-tuning using a lot of data.", + "bbox": [ + 169, + 277, + 826, + 377 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Methodology", + "text_level": 1, + "bbox": [ + 171, + 396, + 316, + 414 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We present how to exploit the spatio-temporal rewards to improve MLLMs in video domains using GRPO. Before that, we revisit the Group Relative Policy Optimization (GRPO) [23] first.", + "bbox": [ + 169, + 429, + 823, + 459 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 Preliminary", + "text_level": 1, + "bbox": [ + 171, + 476, + 297, + 492 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Group Relative Policy Optimization (GRPO) [23] compares groups of candidates responses directly, independent on critic models. Regarding this, it significantly lowers training resources. Formally, with an input query $q$ , GRPO initiates by generating a set of $G$ distinct candidate responses, denoted as $o = \\{o_1, \\dots, o_G\\}$ , via policy sampling. Each of these candidates is then assigned a score from a predefined reward function, yielding $R_1, \\dots, R_G$ . To normalize these scores and ascertain the relative quality of each response, GRPO computes their mean and standard deviation:", + "bbox": [ + 169, + 503, + 825, + 587 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nA _ {i} = \\frac {R _ {i} - \\operatorname {m e a n} \\left(\\left\\{R _ {i} \\right\\} _ {i = 1} ^ {G}\\right)}{\\operatorname {s t d} \\left(\\left\\{R _ {i} \\right\\} _ {i = 1} ^ {G}\\right)}. \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 398, + 595, + 825, + 631 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Here $A_{i}$ quantifies the relative quality of the $i$ -th candidate response within its group. The objective of GRPO is to encourage the model to prioritize and generate responses that achieve higher relative scores within such groups. Furthermore, the overall training objective incorporates a KL-divergence term $\\mathrm{D}_{\\mathrm{KL}}(\\cdot |\\cdot)$ , which serves to constrain the optimized policy $\\pi_{\\theta}$ from diverging excessively from the original MLLM parameters $\\pi_{\\mathrm{ref}}$ .", + "bbox": [ + 169, + 638, + 823, + 709 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2 Spatio-Temporal Rewards of Video MLLM in GRPO", + "text_level": 1, + "bbox": [ + 169, + 728, + 584, + 743 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We explore how to use GRPO to enhance the performance of Video MLLM in video-language understanding. We consider the five most common types of video related tasks: temporal grounding, object tracking, video question answering, captioning, and quality assessment in our experiments.", + "bbox": [ + 169, + 753, + 826, + 797 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Format reward. To enable the model to output responses in the format we desire. For example, we expect the model to enclose its thought process with ... and the answer with ..., we designed a format reward $R_{\\mathrm{format}}$ for each task. We use regular expression matching to determine whether the model adheres to our specified format:", + "bbox": [ + 169, + 813, + 823, + 869 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nR _ {\\text {f o r m a t}} = \\left\\{ \\begin{array}{l l} 1, & \\text {i f o u t p u t m a t c h e s f o r m a t}, \\\\ 0, & \\text {i f o u t p u t d o e s n ’ t m a t c h f o r m a t}. \\end{array} \\right. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 334, + 881, + 825, + 914 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "IoU reward in spatio-temporal perception. For the spatio-temporal perception such as temporal grounding and object tracking, it requires the Video MLLM to output the time interval in the video that is associated with the content of a given textual query. Evidently, we can use the Intersection over Union (IoU) between the predicted interval by the model and the ground-truth interval as the reward function. This reward function effectively characterizes the accuracy of the interval predicted by the model.", + "bbox": [ + 169, + 90, + 823, + 175 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nR _ {\\mathrm {I o U}} = \\frac {\\left| \\mathcal {I} _ {\\text {p r e d}} \\cap \\mathcal {I} _ {\\mathrm {g t}} \\right|}{\\left| \\mathcal {I} _ {\\text {p r e d}} \\cup \\mathcal {I} _ {\\mathrm {g t}} \\right|}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 421, + 181, + 825, + 215 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mathcal{I}_{\\mathrm{pred}}$ and $\\mathcal{I}_{\\mathrm{gt}}$ are the predicted and the ground truth of time intervals or detection boxes, respectively.", + "bbox": [ + 169, + 226, + 826, + 255 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Accuracy reward in classification. Discriminative tasks, such as multiple-choice video question answering and classification, aim to determine whether the model's prediction is consistent with the answer to the question. Therefore, we define:", + "bbox": [ + 169, + 276, + 823, + 319 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nR _ {\\text {a c c u a c y}} = \\left\\{ \\begin{array}{l l} 0, & \\text {i f} A _ {\\text {p r e d}} \\neq A _ {\\text {g t}} \\\\ 1, & \\text {i f} A _ {\\text {p r e d}} = A _ {\\text {g t}}, \\end{array} \\right. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 333, + 825, + 367 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $A_{\\mathrm{pred}}$ and $A_{\\mathrm{gt}}$ denote the predicted and the ground truth answers, respectively.", + "bbox": [ + 169, + 378, + 733, + 395 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Recall reward in video captioning. For tasks like video captioning with open-ended outputs, it is impossible to simply compare and determine the gap between the generated caption and the ground truth caption. Therefore, we use a LLM as a \"judge\" to provide a reward score. In order to reduce the uncertainty of the evaluation criteria for the LLM, we first make the LLM decompose the ground truth and predicted captions into events list. Specifically, we utilize Qwen2.5-72B [34] to extract the events in the description and judge whether the events in a ground truth description can be entailed by the description predicted by the model. We calculate the event recall score as the ratio of events in a ground truth description that are entailed by the predicted description, and set different rewards according to the event recall score:", + "bbox": [ + 169, + 415, + 823, + 540 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nR _ {\\text {r e c a l l}} = \\operatorname {R e c a l l} _ {\\text {e v e n t}} \\left(C _ {\\text {p r e d}}, C _ {\\text {g t}}\\right), \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 382, + 554, + 825, + 570 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $C_{\\mathrm{pred}}$ and $C_{\\mathrm{gt}}$ represent the predicted and the ground truth captions, respectively.", + "bbox": [ + 169, + 582, + 750, + 598 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "By combining the above reward functions, we explored how to utilize GRPO to enhance the performance of Video MLLM in various tasks. The specific details can be found in the Section 4.", + "bbox": [ + 169, + 603, + 826, + 631 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3 Enhance Spatio-Temporal Perception of Video MLLM through GRPO", + "text_level": 1, + "bbox": [ + 169, + 654, + 702, + 669 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The combination of reward functions. We adopt different combinations of reward functions for training in different tasks. Specifically, for the temporal grounding and object tracking task, $R_{\\mathrm{st}} = R_{\\mathrm{format}} + R_{\\mathrm{IoU}}$ . For the multi-choice QA and video quality assessment, $R_{\\mathrm{qa}} = R_{\\mathrm{format}} + R_{\\mathrm{accuracy}}$ . For the multi-choice QA with glue (e.g. Grounding QA), $R_{\\mathrm{gqa}} = R_{\\mathrm{format}} + R_{\\mathrm{IoU}} + R_{\\mathrm{Acc}}$ . For the video caption, $R_{\\mathrm{cap}} = R_{\\mathrm{format}} + R_{\\mathrm{Caption}}$ .", + "bbox": [ + 169, + 681, + 826, + 753 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "VideoChat-R1-18k. We collect diverse video corpus from existing public video datasets for reinforcement learning training to enhance the model's spatiotemporal perception ability. For the temporal grounding task, we use the training set of Charade - STA [7] (5,338 samples) for training. For the object tracking task, training is conducted on the GoT - 10k [10] dataset, which has 9,335 samples. For the QA and grounding QA tasks, the validation set of NExTGQA [31] (3,358 samples) is used for training. For video captioning, FIBER-1k [32] (1,000 samples) is adopted for training. For video quality assessment, we use the quality assessment task from VidTAB [16] under the 100-shot setting, with 200 samples for training. Finally, for the training of VideoChat-R1, we perform joint training on three spatio-temporal perception-related tasks: temporal grounding, object tracking, and grounding QA. In total, 18,031 samples are used for training.", + "bbox": [ + 169, + 772, + 826, + 912 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Algorithm 1: VideoChat $\\mathcal{M}$ with \"Clue-Perception\"" + ], + "code_body": "Input: $V_{\\mathrm{low}}$ : Low-resolution/low-fps vi", + "guess_lang": "txt", + "bbox": [ + 173, + 112, + 436, + 126 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "$Q$ :User question", + "guess_lang": "txt", + "bbox": [ + 173, + 127, + 287, + 140 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "$\\Delta_{\\mathrm{res}}$ : Resolution boost factor", + "guess_lang": "txt", + "bbox": [ + 173, + 140, + 364, + 152 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "$\\Delta_{\\mathrm{fps}}$ : Frame rate boost factor", + "guess_lang": "latex", + "bbox": [ + 173, + 154, + 364, + 167 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Output: $A^{(2)}$ : Final refined answer", + "guess_lang": "txt", + "bbox": [ + 173, + 169, + 410, + 184 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Initial Answer Generation:", + "guess_lang": "txt", + "bbox": [ + 173, + 189, + 364, + 202 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "$(A^{(1)},\\mathcal{C})\\gets \\mathcal{M}(V_{\\mathrm{low}},Q)$", + "guess_lang": "latex", + "bbox": [ + 173, + 202, + 336, + 218 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Clue Processing & Upsampling:", + "guess_lang": "txt", + "bbox": [ + 173, + 222, + 398, + 237 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "$\\mathcal{T}_c \\gets$ ExtractTemporalClues $(\\mathcal{C})$", + "guess_lang": "txt", + "bbox": [ + 197, + 237, + 410, + 250 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "$V_{\\mathrm{seg}} \\gets \\mathrm{SelectSegments}(V_{\\mathrm{low}}, \\mathcal{T}_c)$", + "guess_lang": "txt", + "bbox": [ + 197, + 251, + 416, + 263 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "$V_{\\mathrm{high}} \\gets$ Upsample $(V_{\\mathrm{seg}}, \\Delta_{\\mathrm{res}}, \\Delta_{\\mathrm{fps}})$", + "guess_lang": "latex", + "bbox": [ + 173, + 263, + 405, + 279 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Final Answer Generation:", + "guess_lang": "txt", + "bbox": [ + 173, + 282, + 357, + 296 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "$A^{(2)}\\gets \\mathcal{M}(V_{\\mathrm{high}},Q)$", + "guess_lang": "latex", + "bbox": [ + 173, + 296, + 313, + 311 + ], + "page_idx": 4 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "return $A^{(2)}$", + "guess_lang": "lua", + "bbox": [ + 173, + 316, + 253, + 332 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Chat with Enhanced Spatio-Temporal Perception. After enhancing the spatiotemporal perception capability of MLLMs, we can construct a more reliable video chat system. Specifically, after the model answers a user's question, it can provide relatively accurate clues that support the answer to that question. We can further leverage these clues to improve the accuracy of the responses. Herein, we propose a simple \"Temporal Clue-driven Reasoning\" schema: after the model gives the first answer, we re-input the video segments corresponding to the obtained clues into the model at a higher resolution and frame rate, prompting it to answer again. By perceiving more details, the model can generate more accurate responses. Additionally, this operation is also well-suited for long video understanding under conditions of limited computing resources.", + "bbox": [ + 169, + 354, + 826, + 481 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 171, + 503, + 313, + 521 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Implementation details. The main experiments are all conducted based on Qwen2.5-VL-7B [1] (except for the video captioning, for which Qwen2-VL-7B [26] is used).", + "bbox": [ + 169, + 537, + 823, + 566 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "**Benchmarks.** We employ MVBench [15], Perception Test [21], VideoMME [6] for evaluation of general video understanding. Given that the majority of videos in our training set are short-length, we only use the short subset of VideoMME in testing. For the temporal grounding task, we use the test set of Charade-STA [7] for in-domain testing and the test set of ActivityNet-Grounding [13] as out-domain test data. For the object tracking task, testing is done using the GoT-10k [10] dataset. For the QA and grounding QA tasks, the test set of NExTGQA [31] is used for testing. And we use Dream-1k [25] and VidTAB-QA [16] for the video captioning and video quality access.", + "bbox": [ + 169, + 583, + 826, + 681 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 Evaluation of VideoChat-R1", + "text_level": 1, + "bbox": [ + 171, + 700, + 410, + 715 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/ebb1f5b7ddf5b410b150201df6d2a256266c29fa0069c30730645a948e7e5874.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodCharades-STAActivityNetNExTGQAGoTVideoMMEMVBenchPeception Test
mIoUR@0.5R@0.7mIoUR@0.5R@0.7mIoUaccOverlapR@0.5Short-AvgAvgVal
Baseline
Qwen2.5-VL-7B29.024.211.121.115.87.515.459.512.61.171.366.969.1
SFT on specific tasks
+SFT w/ Charades-STA46.345.025.320.616.77.9----N/A*N/A*N/A*
+SFT w/ GoT--------41.829.559.258.658.5
+SFT w/ NExTGQA------28.264.8--60.159.260.7
GRPO on various tasks
VideoChat-R160.871.750.236.633.417.732.470.643.838.272.267.970.0
VideoChat-R1-thinking59.970.647.235.533.316.736.169.243.333.974.266.269.6
", + "bbox": [ + 173, + 734, + 823, + 857 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1: Results of VideoChat-R1 on various Video Benchmarks. * indicates that the model has suffered from overfitting and is unable to answer the question properly. Since the number of input pixels is fixed during our evaluation, the baseline results are slightly lower than those reported in their origin paper [1].", + "bbox": [ + 169, + 858, + 823, + 915 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As shown in Table 1, after training with GRPO on spatio-temporal perception datasets, both VideoChat-R1 and VideoChat-R1-thinking significantly outperform the performance of Qwen2.5-VL and that of models fine-tuned through SFT for a single specific task across various spatiotemporal perception benchmarks and the general understanding benchmark VideoMME. This validates the effectiveness of our approach, which leverages multiple spatiotemporal perception datasets and RFT for enhancing spatiotemporal perception.", + "bbox": [ + 169, + 90, + 823, + 175 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Meanwhile, we observe that for spatio-temporal perception tasks, engaging in thinking processes does not necessarily lead to performance gains. However, for tasks such as QA and VideoMME, which may require complex reasoning, conducting inferences during testing can result in notable performance improvements.", + "bbox": [ + 169, + 181, + 826, + 238 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 Ablation Studies and Discussions", + "text_level": 1, + "bbox": [ + 171, + 256, + 444, + 270 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Muti-task Co-training. As shown in Table 2, we found that mixed training of different spatiotemporal perception tasks using GRPO can yield a synergistic improvement effect. Training with the multiple tasks achieves nearly the best results across all benchmarks. This reveals the potential of GRPO for larger-scale and multi-task collaborative training in the future.", + "bbox": [ + 169, + 282, + 826, + 339 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/9157e90250119c00b063011b92192f98c3ffde61f85f248f6cd013fdfd7e36b4.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodCharades-STAANetNExTGQAGoTVideoMME
mIoUR@0.5R@0.7mIoUR@0.5R@0.7mIoUaccOverlapR@0.5Short-Avg
Qwen2.5-VL-7B29.024.211.121.115.87.515.459.512.61.171.3
+GRPO w/ STA59.370.446.030.727.512.931.461.227.812.972.6
+GRPO w/GQA36.033.515.524.920.610.735.168.736.126.772.0
+GRPO w/ GoT28.725.19.620.116.26.815.660.542.530.671.4
+GRPO w/ STA-GQA59.869.747.033.731.016.035.767.736.528.972.2
+GRPO w/ STA-GQA-GoT60.871.750.236.633.417.732.470.643.838.272.2
", + "bbox": [ + 179, + 354, + 823, + 457 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/23efda807d3631875492b128e1c6d43a6d1b3acfb866a8d220b7971a875b30c3.jpg", + "table_caption": [ + "Table 2: Ablation results of Cotraining on Spatio-Temporal Tasks." + ], + "table_footnote": [], + "table_body": "
MethodEpochsTraining PromptTest PromptCharades-STA (in domain)ActivityNet (out domain)VideoMME
ThinkAnswerThinkAnswermIoUR@0.3R@0.5R@0.7mIoUR@0.3R@0.5R@0.7Short-Avg
Vision Experts
FlashVTG [2]-------70.349.9-----
InternVideo2-6B [28]-------70.049.0-----
SG-DETR [8]--71.152.8-----
MLLMs
Qwen2.5-VL-7B (baseline)---29.044.724.211.121.128.315.87.471.3
---28.141.823.411.117.722.713.47.771.3
+SFT146.363.945.025.320.630.216.77.9N/A*(-71.3)
334.6(+6.5)51.736.320.617.3(-3.8)26.110.03.9N/A*(-71.3)
+GRPO158.780.967.745.431.946.328.814.172.6
159.3(+31.2)81.770.446.030.7(+13.0)45.027.512.973.6(+2.3)
361.3(+33.2)83.172.851.534.3(+16.6)50.432.216.270.9(-0.4)
", + "bbox": [ + 176, + 500, + 823, + 645 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/cc2f8bc7d7e5a4e75e02523ac34df5e291289f198558c63a1c2e62ec64dc125a.jpg", + "table_caption": [ + "Table 3: Ablation results of Temporal Grounding Task. * indicates that the model has suffered from overfitting and is unable to answer the question properly." + ], + "table_footnote": [], + "table_body": "
MethodGoTVideoMME
Average overlapR@0.5R@0.7Short-Avg
Qwen2.5-VL-7B12.61.1071.3
+SFT41.829.53.959.2
+GRPO42.5(+29.9)30.6(+29.5)3.9(+3.9)71.4(+0.1)
", + "bbox": [ + 302, + 695, + 696, + 767 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 4: Ablation results of Object Tracking. We use 8 frames as input for training and evaluation.", + "bbox": [ + 169, + 768, + 823, + 784 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Temporal Grounding and Object tracking. As shown in Table 3 and Table 4, fine-tuning Qwen2.5-VL using GRPO significantly improves the performance of temporal grounding and object tracking tasks. Additionally, it slightly enhances the performance on the general understanding benchmark VideoMME. Even when training for more epochs, GRPO is less prone to overfitting compared to SFT. Instead, it can continuously improve the performance of temporal grounding, eventually surpassing the performance of previous expert models. Moreover, stimulating the model's thinking ability provides some benefits for both temporal grounding and VideoMME tasks.", + "bbox": [ + 169, + 814, + 826, + 912 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/66de61c965e6f7125a4d0851a6daa88d02b59216d290423ca25063a4fad099db.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodTraining PromptTest PromptNExTGQAVideoMME
ThinkAnswerGlueThinkAnswerGluemIoUaccShort-Avg
Direct Output
Qwen2.5-VL-7B (baseline)-41.771.3
15.459.5-
+SFT-65.160.2
28.2(+12.8)64.8(+5.3)60.1(-11.2)
+GRPO-70.171.7
16.270.271.7
35.1(+19.7)68.7(+9.2)72.0(+0.7)
Chain-of-thought Output
Qwen2.5-VL-7B-47.773.0
20.253.372.2
+GRPO-68.874.7
32.9(+12.7)66.9(+13.6)75.3(+3.1)
", + "bbox": [ + 243, + 88, + 759, + 277 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Video Question Answer. As shown in Table 5, for the video question answering task, we selected the multi-choice QA task, which is easy to evaluate, for our experiments. Additionally, we explored the grounding QA task. In this task, when answering questions, the model is required to simultaneously provide the temporal cues on which its answers are based. Using merely a little over three thousand training data samples, we found that GRPO demonstrated remarkable fine-tuning capabilities. Not only did it lead to a substantial improvement in the performance of the NExTGQA task, but it also brought about a noticeable enhancement in the VideoMME task. We noticed that, unlike the previous strongly spatiotemporal perception tasks such as temporal grounding, thinking played a significant role in the QA task. Meanwhile, the glue signals also provided some assistance for relatively complex video understanding tasks like VideoMME.", + "bbox": [ + 169, + 313, + 826, + 452 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/b817657d6e54ca97c5f4318ea64a574570534ae3d5013321a235e1b68d612e10.jpg", + "table_caption": [ + "Table 5: Ablation results of Multi-Choice Video QA." + ], + "table_footnote": [], + "table_body": "
MethodDream-1kVidTAB-QA
F1PrecisionRecallAccuracy
Baseline30.633.827.970.7
+SFT31.432.630.271.7
+GRPO38.2(+7.6)45.4(+11.6)33.1(+5.2)72.6(+1.9)
", + "bbox": [ + 308, + 465, + 696, + 558 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/e28f9a49ff6de42978fa86862a63a8825cbaa5600ce4964dad48ddb70c43d31e.jpg", + "table_caption": [ + "Table 6: Results of Video Caption and Video Quality Access." + ], + "table_footnote": [], + "table_body": "
MethodLLM JudgeDream-1k
F1PrecisionRecall
Baseline-30.633.827.9
+ GRPOGPT-3.5-turbo-0125 Qwen2.5-72B37.9(+7.3)44.4(+10.6)33(+5.1)
38.2(+7.6)45.4(+11.6)33.1(+5.2)
", + "bbox": [ + 308, + 599, + 696, + 686 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 7: Ablation of Video Caption Task.", + "bbox": [ + 354, + 689, + 640, + 704 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Video Caption and Video Quality Assessment. For the Video Caption and Video Quality Assessment tasks, we found that GRPO still demonstrated its advantages over SFT, As shown in Table 6. The significant metric improvements on these two benchmarks demonstrate the effectiveness of our approach.", + "bbox": [ + 169, + 729, + 826, + 786 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Ablation of Reward Evaluators To assess the impact of different large language models (LLMs) as reward evaluators, we conducted parallel experiments using GPT-3.5-turbo-0125 and Qwen2.5-72B as distinct judges (Table 7). Models trained under both evaluators achieved nearly identical performance, demonstrating consistent caption reward generation across LLMs. We attribute this consistency to GRPO's fundamental mechanism: it relies on relative differential scoring within response groups rather than absolute reward values. This confirms that $R_{recall}$ produces discriminative reward signals for predicted captions independent of the choice of LLM judge, validating both the efficacy of our reward design and the stability of its signaling mechanism. Crucially, when guided by these reliable", + "bbox": [ + 169, + 800, + 826, + 912 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "reward signals, our approach delivers substantial performance gains in description tasks using only limited high-quality data, demonstrating remarkable data-efficiency and significant optimization potential.", + "bbox": [ + 169, + 90, + 823, + 133 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/f09d73a22ceb8cfbd8381414bc174876dd7505bc28c0d0c76d6535620480222f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Model\nAvg. DurationClue\nPerceptionVideoMME\n1010sLongVideoBench\n473s
Qwen2.5-VL-7B64.456.0
63.3(-1.1)55.2(-0.8)
VideoChat-R1-thinking62.151.9
63.6(+1.5)58.2(+6.3)
", + "bbox": [ + 254, + 148, + 743, + 253 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 8: Ablation results of \"Clue-Perception\". It should be noted that due to our adoption of a lower number of input pixels, the absolute performance is not entirely consistent with that reported for Qwen2.5-VL.", + "bbox": [ + 169, + 253, + 823, + 297 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ablation of \"Clue-Perception\" As shown in Table. 8, we compared the performance changes of the model with and without perception enhancement when applying the \"Clue-Perception\" strategy on two representative long video benchmarks [6, 30]. It is noteworthy that without the use of \"Clue-Perception\", VideoChat-R1 showed no significant performance improvement over Qwen2.5-VL-7B in long video tasks, which can be attributed to the fact that our training dataset consists entirely of short videos under 1 minute. However, after the application of the \"Clue-Perception\" operation, VideoChat-R1 demonstrated a significant performance enhancement, indicating that the clues it provides are more accurate and thus revealing the potential of clue-perception in long video understanding. In contrast, due to its insufficient spatiotemporal perception capability, Qwen2.5-VL-7B even exhibited a performance decline after the implementation of the \"Clue-Perception\" operation.", + "bbox": [ + 169, + 325, + 826, + 465 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "GRPO vs. SFT. It can be observed that across various types of tasks, GRPO outperforms SFT. Whether it is in terms of the performance on in-domain tasks, out-domain tasks, or the preservation of the original general performance, our experimental results demonstrate that GRPO is a promising fine-tuning approach. We will leave the large-scale comparison for future research.", + "bbox": [ + 169, + 479, + 823, + 536 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Chain-of-thought vs. Direct Output. Based on the video tasks and experiments we have explored, which focus on spatiotemporal perception, the output of the chain of thought has not demonstrated obvious advantages. In some cases, it is even inferior to the direct output. We believe that how to define appropriate video reasoning tasks and evaluation methods remains to be explored. The existing training data is insufficient to activate the model to output truly effective video reasoning chains.", + "bbox": [ + 169, + 551, + 823, + 622 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Video:", + "text_level": 1, + "bbox": [ + 222, + 681, + 261, + 693 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/14772caaea820bce8abfff3fb25fe055a3d3f489303d7e859376b1f66919cc82.jpg", + "image_caption": [ + "Query: A person puts on a pair of shoes." + ], + "image_footnote": [], + "bbox": [ + 267, + 646, + 325, + 728 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/dc2e7f5b87eda42235292244baad30099c0260240ad371faa2c3a5a56e9b7c19.jpg", + "image_caption": [ + "Figure 2: Examples on temporal grounding task. VideoChat-R1 gives a more accurate time interval after thinking." + ], + "image_footnote": [], + "bbox": [ + 331, + 646, + 387, + 728 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/8b7f6b3d5a683c9e85bc2a6d1964ab2ba421b06778bf9d9a77bfa254bc48d01d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 393, + 646, + 450, + 728 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4c1eda96c7469788fc0f4eed659c9324e2340ecd108e41f3e47883d9abc2b1d2.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 455, + 646, + 513, + 728 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d4d21b6c2f0500b2d1c3036da4a9e1bf736b322c6df991e43cddd7bcba3cce63.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 519, + 646, + 575, + 728 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/3becb2134d2fda7594ca6af18f1f2dab9032a5d21f1b230980bb26d2653d10ee.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 580, + 646, + 637, + 728 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/065345b3bee4b365c92af6880c7186140669d936b910a2cc42e2993ef88b5aea.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 642, + 646, + 699, + 728 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/eaad3848c3f2cbd11d2d60de002994c397acb95d6539b0e3b8f9c88788af4071.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 704, + 646, + 761, + 728 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Ground Truth:", + "bbox": [ + 222, + 750, + 310, + 761 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "1.8s< 9.4s", + "bbox": [ + 333, + 750, + 519, + 761 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "SFT:", + "bbox": [ + 222, + 767, + 253, + 777 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "1.3s←---->4.1s", + "bbox": [ + 272, + 767, + 405, + 777 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "VideoChat-R1:", + "bbox": [ + 222, + 789, + 310, + 801 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "2.0s 10.7s", + "bbox": [ + 326, + 789, + 550, + 801 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "\\nThe man is seen standing in front of a pantry, looking down at something on the floor. He then bends down and picks up a pair of shoes from the floor. He then proceeds to put on the shoes. The event $\"a person puts on a pair of shoes\"$ occurs during this time. $\\backslash n < /$ think>\\n< timestep>\\nThe event happens in 2.00 - 10.7 seconds.\\n\\n< answer>\\n2.00 to 10.67\\n", + "bbox": [ + 220, + 803, + 751, + 852 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/487489ae1c1cc58b30c1bf03f7b482e23ff422fab2c58178909c5e2e21ee197f.jpg", + "image_caption": [ + "Video:" + ], + "image_footnote": [], + "bbox": [ + 267, + 99, + 341, + 142 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/1ec648e71eaf0421911e1ef7520b793de89f5ab05562652eaaa335fc6fc413bc.jpg", + "image_caption": [ + "Figure 3: Examples on Video QA task. It can be seen that VideoChat-R1 can not only answer questions correctly but also provide relatively accurate reference time periods (clue)." + ], + "image_footnote": [], + "bbox": [ + 349, + 99, + 424, + 142 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/9ff70423f7048b551160a4d558cb496609c136f669af2e980186a28f8919de18.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 437, + 99, + 509, + 142 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/4111b6c57103162103af289b7ae203f72e671c4a04ad2c215840b87e546c66ba.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 521, + 99, + 596, + 142 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/77431af26a587d74d78cf548fe934d2e4ac4879c9f8cd07d8ff92b3a6eaf09ee.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 607, + 99, + 681, + 142 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/348f3183f84c293ceb77e665d8f8e8125828222eb63fc005e492dbd23b5b10b7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 691, + 99, + 766, + 142 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Question: Why did the girl suddenly jump up from where she was standing?", + "bbox": [ + 225, + 143, + 629, + 157 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Ground Truth: Excited.", + "bbox": [ + 225, + 164, + 377, + 175 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "[20.9s, 25.0s]", + "bbox": [ + 439, + 162, + 511, + 175 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "E--->", + "bbox": [ + 619, + 162, + 673, + 174 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "SFT: Dance.", + "bbox": [ + 227, + 183, + 370, + 194 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "[19.4s, 37.2s]", + "bbox": [ + 439, + 183, + 511, + 194 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "k--", + "bbox": [ + 616, + 183, + 669, + 194 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "---→", + "bbox": [ + 692, + 183, + 725, + 194 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "VideoChat-R1: Excited.", + "bbox": [ + 225, + 205, + 377, + 217 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "[20.1s, 28.0s]", + "bbox": [ + 439, + 205, + 511, + 215 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "<- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -", + "bbox": [ + 617, + 205, + 687, + 215 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": " The girl jumps up from her position after playing with the rocket toy and then walks around the room holding it. This suggests that she is excited about the toy and wants to show it off. The jump could be interpreted as an expression of excitement. ", + "bbox": [ + 225, + 218, + 759, + 253 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "E[(20.1,28.0)]", + "bbox": [ + 225, + 255, + 493, + 267 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.3 Qualitative Results", + "text_level": 1, + "bbox": [ + 171, + 340, + 346, + 356 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "As shown in Figure 2 and 3, we provide visualizations of VideoChat-R1's outputs for the temporal grounding and video QA tasks. We observe that due to the enhanced spatiotemporal perception capability, VideoChat-R1 can more accurately locate video segments relevant to the question and conduct reasonable reasoning to answer the question. However, compared with the complex chains of thought involved in solving mathematical problems or coding tasks in text and image domains, the chains of thought generated in our current video tasks are relatively simple. We will explore more complex video reasoning tasks in future research.", + "bbox": [ + 169, + 366, + 823, + 464 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 Conclusions", + "text_level": 1, + "bbox": [ + 171, + 483, + 307, + 498 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this work, we systematically investigate the role of reinforcement fine-tuning (RFT) with Group Relative Policy Optimization (GRPO) in enhancing video-centric multimodal large language models (MLLMs). Our experiments demonstrate that RFT is a highly data-efficient paradigm for task-specific improvements, enabling VideoChat-R1—a model trained with limited samples via multi-task RFT—to achieve state-of-the-art performance on spatio-temporal perception tasks while preserving general chat capabilities and exhibiting emergent spatiotemporal reasoning. We believe our work can present relevant insights for future research efforts in reinforcement learning of video MLLMs.", + "bbox": [ + 169, + 513, + 826, + 612 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 89, + 269, + 106 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025.", + "[2] Zhuo Cao, Bingqing Zhang, Heming Du, Xin Yu, Xue Li, and Sen Wang. Flashvtg: Feature layering and adaptive score handling network for video temporal grounding. arXiv preprint arXiv:2412.13441, 2024.", + "[3] Huilin Deng, Ding Zou, Rui Ma, Hongchen Luo, Yang Cao, and Yu Kang. Boosting the generalization and reasoning of vision language models with curriculum reinforcement learning. arXiv preprint arXiv:2503.07065, 2025.", + "[4] Yihe Deng, Hritik Bansal, Fan Yin, Nanyun Peng, Wei Wang, and Kai-Wei Chang. Openthinker: An early exploration to complex vision-language reasoning via iterative self-improvement. arXiv preprint arXiv:2503.17352, 2025.", + "[5] Kaituo Feng, Kaixiong Gong, Bohao Li, Zonghao Guo, Yibing Wang, Tianshuo Peng, Benyou Wang, and Xiangyu Yue. Video-r1: Reinforcing video reasoning in mllms. arXiv preprint arXiv:2503.21776, 2025.", + "[6] Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024.", + "[7] Jiyang Gao, Chen Sun, Zhenheng Yang, and Ram Nevatia. Tall: Temporal activity localization via language query. In Proceedings of the IEEE international conference on computer vision, pages 5267-5275, 2017.", + "[8] Aleksandr Gordeev, Vladimir Dokholyan, Irina Tolstykh, and Maksim Kuprashevich. Saliency-guided detr for moment retrieval and highlight detection. arXiv preprint arXiv:2410.01615, 2024.", + "[9] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025.", + "[10] Lianghua Huang, Xin Zhao, and Kaiqi Huang. Got-10k: A large high-diversity benchmark for generic object tracking in the wild. IEEE transactions on pattern analysis and machine intelligence, 43(5): 1562–1577, 2019.", + "[11] Zhenpeng Huang, Xinhao Li, Jiaqi Li, Jing Wang, Xiangyu Zeng, Cheng Liang, Tao Wu, Xi Chen, Liang Li, and Limin Wang. Online video understanding: A comprehensive benchmark and memory-augmented method. arXiv preprint arXiv:2501.00584, 2024.", + "[12] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024.", + "[13] Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. Dense-captioning events in videos. In Proceedings of the IEEE international conference on computer vision, pages 706–715, 2017.", + "[14] KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023.", + "[15] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mybench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024.", + "[16] Xinhao Li, Zhenpeng Huang, Jing Wang, Kunchang Li, and Limin Wang. Videoeval: Comprehensive benchmark suite for low-cost evaluation of video foundation model. arXiv preprint arXiv:2407.06491, 2024.", + "[17] Xinhao Li, Yi Wang, Jiashuo Yu, Xiangyu Zeng, Yuhan Zhu, Haian Huang, Jianfei Gao, Kunchang Li, Yinan He, Chenting Wang, et al. Videochat-flash: Hierarchical compression for long-context video modeling. arXiv preprint arXiv:2501.00574, 2024.", + "[18] Yuqi Liu, Bohao Peng, Zhisheng Zhong, Zihao Yue, Fanbin Lu, Bei Yu, and Jiaya Jia. Seg-zero: Reasoning-chain guided segmentation via cognitive reinforcement. arXiv preprint arXiv:2503.06520, 2025.", + "[19] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025." + ], + "bbox": [ + 173, + 112, + 826, + 911 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[20] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-chatgpt: Towards detailed video understanding via large vision and language models. arXiv preprint arXiv:2306.05424, 2023.", + "[21] Viorica Patraucean, Lucas Smaira, Ankush Gupta, Adria Recasens, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Mateusz Malinowski, Yi Yang, Carl Doersch, et al. Perception test: A diagnostic benchmark for multimodal video models. Advances in Neural Information Processing Systems, 36: 42748-42761, 2023.", + "[22] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b lmms with strong reasoning abilities through two-stage rule-based r1. arXiv preprint arXiv:2503.07536, 2025.", + "[23] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024.", + "[24] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025.", + "[25] Jiawei Wang, Liping Yuan, Yuchen Zhang, and Haomiao Sun. Tarsier: Recipes for training and evaluating large video description models. arXiv preprint arXiv:2407.00634, 2024.", + "[26] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024.", + "[27] Ye Wang, Boshen Xu, Zihao Yue, Zihan Xiao, Ziheng Wang, Liang Zhang, Dingyi Yang, Wenxuan Wang, and Qin Jin. Timezero: Temporal video grounding with reasoning-guided lvm. arXiv preprint arXiv:2503.13377, 2025.", + "[28] Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Guo Chen, Baoqi Pei, Rongkun Zheng, Zun Wang, Yansong Shi, et al. Internvideo2: Scaling foundation models for multimodal video understanding. In European Conference on Computer Vision, pages 396-416. Springer, 2024.", + "[29] Yi Wang, Xinhao Li, Ziang Yan, Yinan He, Jiashuo Yu, Xiangyu Zeng, Chenting Wang, Changlian Ma, Haian Huang, Jianfei Gao, et al. Internvideo2.5: Empowering video mllms with long and rich context modeling. arXiv preprint arXiv:2501.12386, 2025.", + "[30] Haoning Wu, Dongxu Li, Bei Chen, and Junnan Li. Longvideobench: A benchmark for long-context interleaved video-language understanding. Advances in Neural Information Processing Systems, 37: 28828-28857, 2024.", + "[31] Junbin Xiao, Angela Yao, Yicong Li, and Tat-Seng Chua. Can i trust your answer? visually grounded video question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13204-13214, 2024.", + "[32] Yifan Xu, Xinhao Li, Yichun Yang, Rui Huang, and Limin Wang. Fine-grained video-text retrieval: A new benchmark and method. arXiv preprint arXiv:2501.00513, 2024.", + "[33] Ziang Yan, Zhilin Li, Yinan He, Chenting Wang, Kunchang Li, Xinhao Li, Xiangyu Zeng, Zilei Wang, Yali Wang, Yu Qiao, et al. Task preference optimization: Improving multimodal large language models with vision task alignment. arXiv preprint arXiv:2412.19326, 2024.", + "[34] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024.", + "[35] Yi Yang, Xiaoxuan He, Hongkun Pan, Xiyan Jiang, Yan Deng, Xingtao Yang, Haoyu Lu, Dacheng Yin, Fengyun Rao, Minfeng Zhu, et al. R1-onevision: Advancing generalized multimodal reasoning through cross-modal formalization. arXiv preprint arXiv:2503.10615, 2025.", + "[36] En Yu, Liang Zhao, Yana Wei, Jinrong Yang, Dongming Wu, Lingyu Kong, Haoran Wei, Tiancai Wang, Zheng Ge, Xiangyu Zhang, et al. Merlin: Empowering multimodal llms with foresight minds. In European Conference on Computer Vision, pages 425-443. Springer, 2024.", + "[37] Xiangyu Zeng, Kunchang Li, Chenting Wang, Xinhao Li, Tianxiang Jiang, Ziang Yan, Songze Li, Yansong Shi, Zhengrong Yue, Yi Wang, et al. Timesuite: Improving mllms for long video understanding via grounded tuning. arXiv preprint arXiv:2410.19702, 2024." + ], + "bbox": [ + 173, + 90, + 826, + 912 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[38] Yufei Zhan, Yousong Zhu, Shurong Zheng, Hongyin Zhao, Fan Yang, Ming Tang, and Jinqiao Wang. Vision-r1: Evolving human-free alignment in large vision-language models via vision-guided reinforcement learning. arXiv preprint arXiv:2503.18013, 2025.", + "[39] Yufei Zhan, Yousong Zhu, Shurong Zheng, Hongyin Zhao, Fan Yang, Ming Tang, and Jinqiao Wang. Vision-r1: Evolving human-free alignment in large vision-language models via vision-guided reinforcement learning. arXiv preprint arXiv:2503.18013, 2025.", + "[40] Jingyi Zhang, Jiaxing Huang, Huanjin Yao, Shunyu Liu, Xikun Zhang, Shijian Lu, and Dacheng Tao. R1-vl: Learning to reason with multimodal large language models via step-wise group relative policy optimization. arXiv preprint arXiv:2503.12937, 2025.", + "[41] Yuanhan Zhang, Jinming Wu, Wei Li, Bo Li, Zejun Ma, Ziwei Liu, and Chunyuan Li. Video instruction tuning with synthetic data. arXiv preprint arXiv:2410.02713, 2024.", + "[42] Jiaxing Zhao, Xihan Wei, and Liefeng Bo. R1-omni: Explainable omni-multimodal emotion recognition with reinforcement learning. arXiv e-prints, pages arXiv-2503, 2025.", + "[43] Hengguang Zhou, Xinui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. R1-zero's\" aha moment\" in visual reasoning on a 2b non-sft model. arXiv preprint arXiv:2503.05132, 2025." + ], + "bbox": [ + 173, + 90, + 825, + 333 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 11 + } +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06958/6f298a94-d9f1-4eed-a03e-720f559a2c57_model.json b/data/2025/2504_06xxx/2504.06958/6f298a94-d9f1-4eed-a03e-720f559a2c57_model.json new file mode 100644 index 0000000000000000000000000000000000000000..268708d1273172734b4650a360637ffcbc1bca6d --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/6f298a94-d9f1-4eed-a03e-720f559a2c57_model.json @@ -0,0 +1,2171 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.275, + 0.058, + 0.723 + ], + "angle": 270, + "content": "arXiv:2504.06958v5 [cs.CV] 11 Nov 2025" + }, + { + "type": "title", + "bbox": [ + 0.233, + 0.123, + 0.768, + 0.175 + ], + "angle": 0, + "content": "VideoChat-R1: Enhancing Spatio-Temporal Perception via Reinforcement Fine-Tuning" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.224, + 0.8, + 0.256 + ], + "angle": 0, + "content": "Xinhao Li\\(^{2,1*}\\), Ziang Yan\\(^{3,1*}\\), Desen Meng\\(^{2*}\\), Lu Dong\\(^{4,1}\\), Xiangyu Zeng\\(^{2,1}\\), Yinan He\\(^{1}\\), Yali Wang\\(^{6,1}\\), Yu Qiao\\(^{1,6}\\), Yi Wang\\(^{\\dagger 1,5}\\), Limin Wang\\(^{\\dagger 2,1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.257, + 0.256, + 0.741, + 0.283 + ], + "angle": 0, + "content": "\\(^{1}\\)Shanghai AI Laboratory \\(^{2}\\)Nanjing University \\(^{3}\\)Zhejiang University \\(^{4}\\)University of Science and Technology of China \\(^{5}\\)Shanghai Innovation Institute" + }, + { + "type": "text", + "bbox": [ + 0.268, + 0.283, + 0.729, + 0.31 + ], + "angle": 0, + "content": "Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences https://github.com/OpenGVLab/VideoChat-R1" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.347, + 0.538, + 0.362 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.377, + 0.769, + 0.599 + ], + "angle": 0, + "content": "Reinforcement Learning (RL) benefits Large Language Models (LLMs) for complex reasoning. Inspired by this, we explore integrating spatio-temporal specific rewards into Multimodal Large Language Models (MLLMs) to address the unique challenges of video understanding, such as long-range temporal associations. This paper investigates how rule-based rewards, particularly temporal ones, can improve video reasoning and their generalizability. Our study proposes Reinforcement Fine-Tuning (RFT) as a data-efficient method to enhance video reasoning on specific tasks without sacrificing original capabilities. Through joint RFT on multiple spatio-temporal perception tasks, we developed VideoChat-R1, a powerful Video MLLM. VideoChat-R1 achieves state-of-the-art spatio-temporal perception, demonstrating significant improvements in tasks like temporal grounding (+31.8) and object tracking (+31.2), while also improving general QA benchmarks. The enhanced perception and preserved chat abilities contribute to a more reliable video dialogue system, leading to our \"Temporal Clue-driven Reasoning\" inference schema. This work provides a foundation for developing robust, real-world video comprehension agents." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.622, + 0.314, + 0.637 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.652, + 0.828, + 0.752 + ], + "angle": 0, + "content": "The integration of reinforcement learning (RL) has notably propelled the capabilities of large language models (LLMs) forward, particularly in complex reasoning. Studies, such as o-series from OpenAI [12] and R1 of DeepSeek [9], demonstrated that test-time scaling improves model reasoning and such reasoning is elicit-able via only rule-based rewarding. Inspired by these advances, community is interested in tuning multimodal large language models (MLLMs) with vision-related verifications in Group Relative Policy Optimization (GRPO) [9] manner [43, 18, 38, 3, 22, 19, 35, 40, 39, 4]. They primarily consider visual mathematical reasoning and spatial localization." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.756, + 0.828, + 0.882 + ], + "angle": 0, + "content": "Regarding video understanding, reasoning is also crucial as some of its core abilities (e.g. its long-range temporal association, fine-grained spatiotemporal understanding based on user queries) are barely addressed via perception or simple analysis. Initial works [42, 27, 5] have validated the superiority of the GRPO algorithm over supervised fine-tuning in some specific video tasks, such as temporal grounding and video question answer. Considering advancing the agent development with real-world video comprehension, we need to investigate and integrate spatio-temporal specific rewards or verification mechanisms directly into the learning of current MLLMs in a scalable manner. This could foster a more tightly coupled, near-closed-loop learning environment, enabling MLLMs to master intricate spatio-temporal skills. While our current work may not fully achieve this ambitious" + }, + { + "type": "page_footnote", + "bbox": [ + 0.198, + 0.888, + 0.48, + 0.904 + ], + "angle": 0, + "content": "* Equal contribution. † Corresponding authors." + }, + { + "type": "footer", + "bbox": [ + 0.172, + 0.923, + 0.316, + 0.938 + ], + "angle": 0, + "content": "Preprint. Under review." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.182, + 0.094, + 0.818, + 0.291 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.304, + 0.825, + 0.348 + ], + "angle": 0, + "content": "Figure 1: Overview of VideoChat-R1. Through reinforcement learning fine-tuning using GRPO, VideoChat-R1 has powerful spatio-temporal perception capabilities and can apply these capabilities in chatting scenarios." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.387, + 0.825, + 0.445 + ], + "angle": 0, + "content": "goal, it endeavors to lay foundational groundwork for the community by exploring how to embed such mechanisms effectively. Specifically, we in this paper chart the landscape of how rule-based rewards (especially temporal related ones) work in video understanding, along with systematic evaluations of the its generalizability across diverse video-based reasoning scenarios." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.449, + 0.825, + 0.546 + ], + "angle": 0, + "content": "Our study begins with spatio-temporal perception, extending to other popular video tasks (e.g. video caption), forming a practical recipe to enhance video reasoning via proper Reinforcement Fine-Tuning (RFT). We evaluate the robustness of various video verifications through different training and testing setting, also benchmarking their respective data requirements. Moreover, how these rewards interact with each other are also systematically explored. Based on these experiences, we give an intuitive and effective inference schema named \"Temporal Clue-driven Reasoning\". Generally, our main findings are as follows." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.57, + 0.825, + 0.641 + ], + "angle": 0, + "content": "- Reinforcement fine-tuning is data-efficient for enhancing models on specific tasks without sacrificing original capabilities. With a small amount of data, training via RFT can yield a remarkable improvement in spatio-temporal perception ability, and there is negligible impact on the performance of out-domain tasks and the original general capabilities of the model, which outperforms traditional supervised fine-tuning significantly." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.658, + 0.827, + 0.783 + ], + "angle": 0, + "content": "- Through joint reinforcement fine-tuning on multiple spatio-temporal perception tasks, we construct VideoChat-R1, a powerful Video MLLM that boasts state-of-the-art spatiotemporal perception capabilities while also taking into account chat abilities. We have also discovered that training on spatio-temporal perception tasks has slightly strengthened the model's spatio-temporal reasoning abilities. Compared with Qwen2.5-VL-7B, VideoChat-R1 achieves several times the performance improvement in spatiotemporal perception tasks such as temporal grounding (+31.8) and object track (+31.2). At the same time, it also achieves non-trivial improvements on general QA benchmarks, such as VideoMME (+0.9), MVBench (+1.0), and Perception Test (+0.9)" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.801, + 0.827, + 0.913 + ], + "angle": 0, + "content": "- The improvement of spatio-temporal perception ability and the preservation of the original chat capability can contribute to a more reliable and efficient video dialogue system. Our VideoChat-R1 can provide reference video segments when answering users' questions. Meanwhile, we propose to utilize these video segments for \"Temporal Clue-driven Reasoning\" to further obtain more accurate answers. Our experimental results reveal the potential of the approach that enhances the model's spatiotemporal perception ability through reinforcement learning for future research in the directions of reliable video dialogue systems and long video understanding." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.57, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.09, + 0.32, + 0.106 + ], + "angle": 0, + "content": "2 Related work" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.122, + 0.827, + 0.262 + ], + "angle": 0, + "content": "Reasoning in MLLMs. The field LLMs has witnessed significant breakthroughs in reasoning, exemplified by recent works [12, 9]. These developments [23, 9, 24] have markedly improved LLMs' proficiency in tackling complex, multi-step tasks, such as challenging mathematical problems and coding exercises. Extending these RL techniques to multimodal large language models (MLLMs) has seen numerous efforts [43, 18, 38, 3, 22, 19, 35, 40, 39, 4] focused on leveraging verifiable reward mechanisms to boost visual reasoning performance. However, the application of RL-based strategies to spatio-temporal reasoning within the video domain remains comparatively less explored, with a few studies [27, 42, 5] investigating this area. [27] and [42] show GRPO's potential in temporal grounding and sentiment analysis, while, [5] demonstrates how GRPO can facilitate temporal reasoning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.278, + 0.828, + 0.378 + ], + "angle": 0, + "content": "Spatio-Temporal Understanding with MLLMs. Video understanding heavily relies on spatiotemporal perception. Progress in Video Multimodal Large Language Models (video MLLMs) [14, 20, 15, 28, 41, 29, 17, 11, 1] addresses video question answering and captioning in the unfed dialogue format, leveraging LLMs to organize predictions over visual perceptions. [36] and [37] leverage data augmentation for temporal modeling, yet compromising their general performance. [33] improves fine-grained spatio-temporal perception by task preference optimization with solid supervised fine-tuning using a lot of data." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.397, + 0.317, + 0.415 + ], + "angle": 0, + "content": "3 Methodology" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.43, + 0.825, + 0.46 + ], + "angle": 0, + "content": "We present how to exploit the spatio-temporal rewards to improve MLLMs in video domains using GRPO. Before that, we revisit the Group Relative Policy Optimization (GRPO) [23] first." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.477, + 0.298, + 0.493 + ], + "angle": 0, + "content": "3.1 Preliminary" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.504, + 0.826, + 0.588 + ], + "angle": 0, + "content": "Group Relative Policy Optimization (GRPO) [23] compares groups of candidates responses directly, independent on critic models. Regarding this, it significantly lowers training resources. Formally, with an input query \\( q \\), GRPO initiates by generating a set of \\( G \\) distinct candidate responses, denoted as \\( o = \\{o_1, \\dots, o_G\\} \\), via policy sampling. Each of these candidates is then assigned a score from a predefined reward function, yielding \\( R_1, \\dots, R_G \\). To normalize these scores and ascertain the relative quality of each response, GRPO computes their mean and standard deviation:" + }, + { + "type": "equation", + "bbox": [ + 0.4, + 0.597, + 0.826, + 0.632 + ], + "angle": 0, + "content": "\\[\nA _ {i} = \\frac {R _ {i} - \\operatorname {m e a n} \\left(\\left\\{R _ {i} \\right\\} _ {i = 1} ^ {G}\\right)}{\\operatorname {s t d} \\left(\\left\\{R _ {i} \\right\\} _ {i = 1} ^ {G}\\right)}. \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.64, + 0.825, + 0.71 + ], + "angle": 0, + "content": "Here \\(A_{i}\\) quantifies the relative quality of the \\(i\\)-th candidate response within its group. The objective of GRPO is to encourage the model to prioritize and generate responses that achieve higher relative scores within such groups. Furthermore, the overall training objective incorporates a KL-divergence term \\(\\mathrm{D}_{\\mathrm{KL}}(\\cdot |\\cdot)\\), which serves to constrain the optimized policy \\(\\pi_{\\theta}\\) from diverging excessively from the original MLLM parameters \\(\\pi_{\\mathrm{ref}}\\)." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.729, + 0.585, + 0.744 + ], + "angle": 0, + "content": "3.2 Spatio-Temporal Rewards of Video MLLM in GRPO" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.755, + 0.827, + 0.799 + ], + "angle": 0, + "content": "We explore how to use GRPO to enhance the performance of Video MLLM in video-language understanding. We consider the five most common types of video related tasks: temporal grounding, object tracking, video question answering, captioning, and quality assessment in our experiments." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.814, + 0.825, + 0.871 + ], + "angle": 0, + "content": "Format reward. To enable the model to output responses in the format we desire. For example, we expect the model to enclose its thought process with ... and the answer with ..., we designed a format reward \\( R_{\\mathrm{format}} \\) for each task. We use regular expression matching to determine whether the model adheres to our specified format:" + }, + { + "type": "equation", + "bbox": [ + 0.335, + 0.882, + 0.826, + 0.915 + ], + "angle": 0, + "content": "\\[\nR _ {\\text {f o r m a t}} = \\left\\{ \\begin{array}{l l} 1, & \\text {i f o u t p u t m a t c h e s f o r m a t}, \\\\ 0, & \\text {i f o u t p u t d o e s n ’ t m a t c h f o r m a t}. \\end{array} \\right. \\tag {2}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.176 + ], + "angle": 0, + "content": "IoU reward in spatio-temporal perception. For the spatio-temporal perception such as temporal grounding and object tracking, it requires the Video MLLM to output the time interval in the video that is associated with the content of a given textual query. Evidently, we can use the Intersection over Union (IoU) between the predicted interval by the model and the ground-truth interval as the reward function. This reward function effectively characterizes the accuracy of the interval predicted by the model." + }, + { + "type": "equation", + "bbox": [ + 0.423, + 0.183, + 0.826, + 0.217 + ], + "angle": 0, + "content": "\\[\nR _ {\\mathrm {I o U}} = \\frac {\\left| \\mathcal {I} _ {\\text {p r e d}} \\cap \\mathcal {I} _ {\\mathrm {g t}} \\right|}{\\left| \\mathcal {I} _ {\\text {p r e d}} \\cup \\mathcal {I} _ {\\mathrm {g t}} \\right|}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.227, + 0.827, + 0.256 + ], + "angle": 0, + "content": "where \\(\\mathcal{I}_{\\mathrm{pred}}\\) and \\(\\mathcal{I}_{\\mathrm{gt}}\\) are the predicted and the ground truth of time intervals or detection boxes, respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.277, + 0.825, + 0.32 + ], + "angle": 0, + "content": "Accuracy reward in classification. Discriminative tasks, such as multiple-choice video question answering and classification, aim to determine whether the model's prediction is consistent with the answer to the question. Therefore, we define:" + }, + { + "type": "equation", + "bbox": [ + 0.382, + 0.334, + 0.826, + 0.368 + ], + "angle": 0, + "content": "\\[\nR _ {\\text {a c c u a c y}} = \\left\\{ \\begin{array}{l l} 0, & \\text {i f} A _ {\\text {p r e d}} \\neq A _ {\\text {g t}} \\\\ 1, & \\text {i f} A _ {\\text {p r e d}} = A _ {\\text {g t}}, \\end{array} \\right. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.38, + 0.734, + 0.396 + ], + "angle": 0, + "content": "where \\( A_{\\mathrm{pred}} \\) and \\( A_{\\mathrm{gt}} \\) denote the predicted and the ground truth answers, respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.416, + 0.825, + 0.541 + ], + "angle": 0, + "content": "Recall reward in video captioning. For tasks like video captioning with open-ended outputs, it is impossible to simply compare and determine the gap between the generated caption and the ground truth caption. Therefore, we use a LLM as a \"judge\" to provide a reward score. In order to reduce the uncertainty of the evaluation criteria for the LLM, we first make the LLM decompose the ground truth and predicted captions into events list. Specifically, we utilize Qwen2.5-72B [34] to extract the events in the description and judge whether the events in a ground truth description can be entailed by the description predicted by the model. We calculate the event recall score as the ratio of events in a ground truth description that are entailed by the predicted description, and set different rewards according to the event recall score:" + }, + { + "type": "equation", + "bbox": [ + 0.383, + 0.555, + 0.826, + 0.571 + ], + "angle": 0, + "content": "\\[\nR _ {\\text {r e c a l l}} = \\operatorname {R e c a l l} _ {\\text {e v e n t}} \\left(C _ {\\text {p r e d}}, C _ {\\text {g t}}\\right), \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.583, + 0.75, + 0.599 + ], + "angle": 0, + "content": "where \\( C_{\\mathrm{pred}} \\) and \\( C_{\\mathrm{gt}} \\) represent the predicted and the ground truth captions, respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.604, + 0.827, + 0.632 + ], + "angle": 0, + "content": "By combining the above reward functions, we explored how to utilize GRPO to enhance the performance of Video MLLM in various tasks. The specific details can be found in the Section 4." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.655, + 0.704, + 0.67 + ], + "angle": 0, + "content": "3.3 Enhance Spatio-Temporal Perception of Video MLLM through GRPO" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.682, + 0.827, + 0.755 + ], + "angle": 0, + "content": "The combination of reward functions. We adopt different combinations of reward functions for training in different tasks. Specifically, for the temporal grounding and object tracking task, \\( R_{\\mathrm{st}} = R_{\\mathrm{format}} + R_{\\mathrm{IoU}} \\). For the multi-choice QA and video quality assessment, \\( R_{\\mathrm{qa}} = R_{\\mathrm{format}} + R_{\\mathrm{accuracy}} \\). For the multi-choice QA with glue (e.g. Grounding QA), \\( R_{\\mathrm{gqa}} = R_{\\mathrm{format}} + R_{\\mathrm{IoU}} + R_{\\mathrm{Acc}} \\). For the video caption, \\( R_{\\mathrm{cap}} = R_{\\mathrm{format}} + R_{\\mathrm{Caption}} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.773, + 0.827, + 0.913 + ], + "angle": 0, + "content": "VideoChat-R1-18k. We collect diverse video corpus from existing public video datasets for reinforcement learning training to enhance the model's spatiotemporal perception ability. For the temporal grounding task, we use the training set of Charade - STA [7] (5,338 samples) for training. For the object tracking task, training is conducted on the GoT - 10k [10] dataset, which has 9,335 samples. For the QA and grounding QA tasks, the validation set of NExTGQA [31] (3,358 samples) is used for training. For video captioning, FIBER-1k [32] (1,000 samples) is adopted for training. For video quality assessment, we use the quality assessment task from VidTAB [16] under the 100-shot setting, with 200 samples for training. Finally, for the training of VideoChat-R1, we perform joint training on three spatio-temporal perception-related tasks: temporal grounding, object tracking, and grounding QA. In total, 18,031 samples are used for training." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.095, + 0.521, + 0.11 + ], + "angle": 0, + "content": "Algorithm 1: VideoChat \\(\\mathcal{M}\\) with \"Clue-Perception\"" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.113, + 0.437, + 0.127 + ], + "angle": 0, + "content": "Input: \\( V_{\\mathrm{low}} \\): Low-resolution/low-fps vi" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.128, + 0.288, + 0.141 + ], + "angle": 0, + "content": "\\(Q\\) :User question" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.141, + 0.365, + 0.154 + ], + "angle": 0, + "content": "\\(\\Delta_{\\mathrm{res}}\\) : Resolution boost factor" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.155, + 0.365, + 0.169 + ], + "angle": 0, + "content": "\\(\\Delta_{\\mathrm{fps}}\\) : Frame rate boost factor" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.17, + 0.411, + 0.185 + ], + "angle": 0, + "content": "Output: \\(A^{(2)}\\): Final refined answer" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.19, + 0.365, + 0.203 + ], + "angle": 0, + "content": "Initial Answer Generation:" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.203, + 0.338, + 0.219 + ], + "angle": 0, + "content": "\\((A^{(1)},\\mathcal{C})\\gets \\mathcal{M}(V_{\\mathrm{low}},Q)\\)" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.223, + 0.399, + 0.238 + ], + "angle": 0, + "content": "Clue Processing & Upsampling:" + }, + { + "type": "code", + "bbox": [ + 0.199, + 0.238, + 0.411, + 0.251 + ], + "angle": 0, + "content": "\\(\\mathcal{T}_c \\gets\\) ExtractTemporalClues \\((\\mathcal{C})\\)" + }, + { + "type": "code", + "bbox": [ + 0.199, + 0.252, + 0.418, + 0.265 + ], + "angle": 0, + "content": "\\(V_{\\mathrm{seg}} \\gets \\mathrm{SelectSegments}(V_{\\mathrm{low}}, \\mathcal{T}_c)\\)" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.265, + 0.406, + 0.28 + ], + "angle": 0, + "content": "\\(V_{\\mathrm{high}} \\gets\\) Upsample \\((V_{\\mathrm{seg}}, \\Delta_{\\mathrm{res}}, \\Delta_{\\mathrm{fps}})\\)" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.284, + 0.358, + 0.297 + ], + "angle": 0, + "content": "Final Answer Generation:" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.297, + 0.314, + 0.313 + ], + "angle": 0, + "content": "\\(A^{(2)}\\gets \\mathcal{M}(V_{\\mathrm{high}},Q)\\)" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.318, + 0.254, + 0.333 + ], + "angle": 0, + "content": "return \\(A^{(2)}\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.356, + 0.828, + 0.482 + ], + "angle": 0, + "content": "Chat with Enhanced Spatio-Temporal Perception. After enhancing the spatiotemporal perception capability of MLLMs, we can construct a more reliable video chat system. Specifically, after the model answers a user's question, it can provide relatively accurate clues that support the answer to that question. We can further leverage these clues to improve the accuracy of the responses. Herein, we propose a simple \"Temporal Clue-driven Reasoning\" schema: after the model gives the first answer, we re-input the video segments corresponding to the obtained clues into the model at a higher resolution and frame rate, prompting it to answer again. By perceiving more details, the model can generate more accurate responses. Additionally, this operation is also well-suited for long video understanding under conditions of limited computing resources." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.504, + 0.314, + 0.522 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.538, + 0.825, + 0.567 + ], + "angle": 0, + "content": "Implementation details. The main experiments are all conducted based on Qwen2.5-VL-7B [1] (except for the video captioning, for which Qwen2-VL-7B [26] is used)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.584, + 0.828, + 0.683 + ], + "angle": 0, + "content": "**Benchmarks.** We employ MVBench [15], Perception Test [21], VideoMME [6] for evaluation of general video understanding. Given that the majority of videos in our training set are short-length, we only use the short subset of VideoMME in testing. For the temporal grounding task, we use the test set of Charade-STA [7] for in-domain testing and the test set of ActivityNet-Grounding [13] as out-domain test data. For the object tracking task, testing is done using the GoT-10k [10] dataset. For the QA and grounding QA tasks, the test set of NExTGQA [31] is used for testing. And we use Dream-1k [25] and VidTAB-QA [16] for the video captioning and video quality access." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.702, + 0.411, + 0.716 + ], + "angle": 0, + "content": "4.1 Evaluation of VideoChat-R1" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.736, + 0.825, + 0.858 + ], + "angle": 0, + "content": "
MethodCharades-STAActivityNetNExTGQAGoTVideoMMEMVBenchPeception Test
mIoUR@0.5R@0.7mIoUR@0.5R@0.7mIoUaccOverlapR@0.5Short-AvgAvgVal
Baseline
Qwen2.5-VL-7B29.024.211.121.115.87.515.459.512.61.171.366.969.1
SFT on specific tasks
+SFT w/ Charades-STA46.345.025.320.616.77.9----N/A*N/A*N/A*
+SFT w/ GoT--------41.829.559.258.658.5
+SFT w/ NExTGQA------28.264.8--60.159.260.7
GRPO on various tasks
VideoChat-R160.871.750.236.633.417.732.470.643.838.272.267.970.0
VideoChat-R1-thinking59.970.647.235.533.316.736.169.243.333.974.266.269.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.859, + 0.825, + 0.916 + ], + "angle": 0, + "content": "Table 1: Results of VideoChat-R1 on various Video Benchmarks. * indicates that the model has suffered from overfitting and is unable to answer the question properly. Since the number of input pixels is fixed during our evaluation, the baseline results are slightly lower than those reported in their origin paper [1]." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.092, + 0.825, + 0.176 + ], + "angle": 0, + "content": "As shown in Table 1, after training with GRPO on spatio-temporal perception datasets, both VideoChat-R1 and VideoChat-R1-thinking significantly outperform the performance of Qwen2.5-VL and that of models fine-tuned through SFT for a single specific task across various spatiotemporal perception benchmarks and the general understanding benchmark VideoMME. This validates the effectiveness of our approach, which leverages multiple spatiotemporal perception datasets and RFT for enhancing spatiotemporal perception." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.182, + 0.827, + 0.239 + ], + "angle": 0, + "content": "Meanwhile, we observe that for spatio-temporal perception tasks, engaging in thinking processes does not necessarily lead to performance gains. However, for tasks such as QA and VideoMME, which may require complex reasoning, conducting inferences during testing can result in notable performance improvements." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.257, + 0.445, + 0.271 + ], + "angle": 0, + "content": "4.2 Ablation Studies and Discussions" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.283, + 0.828, + 0.34 + ], + "angle": 0, + "content": "Muti-task Co-training. As shown in Table 2, we found that mixed training of different spatiotemporal perception tasks using GRPO can yield a synergistic improvement effect. Training with the multiple tasks achieves nearly the best results across all benchmarks. This reveals the potential of GRPO for larger-scale and multi-task collaborative training in the future." + }, + { + "type": "table", + "bbox": [ + 0.18, + 0.355, + 0.825, + 0.458 + ], + "angle": 0, + "content": "
MethodCharades-STAANetNExTGQAGoTVideoMME
mIoUR@0.5R@0.7mIoUR@0.5R@0.7mIoUaccOverlapR@0.5Short-Avg
Qwen2.5-VL-7B29.024.211.121.115.87.515.459.512.61.171.3
+GRPO w/ STA59.370.446.030.727.512.931.461.227.812.972.6
+GRPO w/GQA36.033.515.524.920.610.735.168.736.126.772.0
+GRPO w/ GoT28.725.19.620.116.26.815.660.542.530.671.4
+GRPO w/ STA-GQA59.869.747.033.731.016.035.767.736.528.972.2
+GRPO w/ STA-GQA-GoT60.871.750.236.633.417.732.470.643.838.272.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.267, + 0.46, + 0.729, + 0.474 + ], + "angle": 0, + "content": "Table 2: Ablation results of Cotraining on Spatio-Temporal Tasks." + }, + { + "type": "table", + "bbox": [ + 0.177, + 0.501, + 0.825, + 0.646 + ], + "angle": 0, + "content": "
MethodEpochsTraining PromptTest PromptCharades-STA (in domain)ActivityNet (out domain)VideoMME
ThinkAnswerThinkAnswermIoUR@0.3R@0.5R@0.7mIoUR@0.3R@0.5R@0.7Short-Avg
Vision Experts
FlashVTG [2]-------70.349.9-----
InternVideo2-6B [28]-------70.049.0-----
SG-DETR [8]--71.152.8-----
MLLMs
Qwen2.5-VL-7B (baseline)---29.044.724.211.121.128.315.87.471.3
---28.141.823.411.117.722.713.47.771.3
+SFT146.363.945.025.320.630.216.77.9N/A*(-71.3)
334.6(+6.5)51.736.320.617.3(-3.8)26.110.03.9N/A*(-71.3)
+GRPO158.780.967.745.431.946.328.814.172.6
159.3(+31.2)81.770.446.030.7(+13.0)45.027.512.973.6(+2.3)
361.3(+33.2)83.172.851.534.3(+16.6)50.432.216.270.9(-0.4)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.648, + 0.825, + 0.677 + ], + "angle": 0, + "content": "Table 3: Ablation results of Temporal Grounding Task. * indicates that the model has suffered from overfitting and is unable to answer the question properly." + }, + { + "type": "table", + "bbox": [ + 0.303, + 0.696, + 0.697, + 0.768 + ], + "angle": 0, + "content": "
MethodGoTVideoMME
Average overlapR@0.5R@0.7Short-Avg
Qwen2.5-VL-7B12.61.1071.3
+SFT41.829.53.959.2
+GRPO42.5(+29.9)30.6(+29.5)3.9(+3.9)71.4(+0.1)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.769, + 0.825, + 0.785 + ], + "angle": 0, + "content": "Table 4: Ablation results of Object Tracking. We use 8 frames as input for training and evaluation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.815, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Temporal Grounding and Object tracking. As shown in Table 3 and Table 4, fine-tuning Qwen2.5-VL using GRPO significantly improves the performance of temporal grounding and object tracking tasks. Additionally, it slightly enhances the performance on the general understanding benchmark VideoMME. Even when training for more epochs, GRPO is less prone to overfitting compared to SFT. Instead, it can continuously improve the performance of temporal grounding, eventually surpassing the performance of previous expert models. Moreover, stimulating the model's thinking ability provides some benefits for both temporal grounding and VideoMME tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.505, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.244, + 0.089, + 0.761, + 0.278 + ], + "angle": 0, + "content": "
MethodTraining PromptTest PromptNExTGQAVideoMME
ThinkAnswerGlueThinkAnswerGluemIoUaccShort-Avg
Direct Output
Qwen2.5-VL-7B (baseline)-41.771.3
15.459.5-
+SFT-65.160.2
28.2(+12.8)64.8(+5.3)60.1(-11.2)
+GRPO-70.171.7
16.270.271.7
35.1(+19.7)68.7(+9.2)72.0(+0.7)
Chain-of-thought Output
Qwen2.5-VL-7B-47.773.0
20.253.372.2
+GRPO-68.874.7
32.9(+12.7)66.9(+13.6)75.3(+3.1)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.316, + 0.28, + 0.68, + 0.294 + ], + "angle": 0, + "content": "Table 5: Ablation results of Multi-Choice Video QA." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.314, + 0.828, + 0.453 + ], + "angle": 0, + "content": "Video Question Answer. As shown in Table 5, for the video question answering task, we selected the multi-choice QA task, which is easy to evaluate, for our experiments. Additionally, we explored the grounding QA task. In this task, when answering questions, the model is required to simultaneously provide the temporal cues on which its answers are based. Using merely a little over three thousand training data samples, we found that GRPO demonstrated remarkable fine-tuning capabilities. Not only did it lead to a substantial improvement in the performance of the NExTGQA task, but it also brought about a noticeable enhancement in the VideoMME task. We noticed that, unlike the previous strongly spatiotemporal perception tasks such as temporal grounding, thinking played a significant role in the QA task. Meanwhile, the glue signals also provided some assistance for relatively complex video understanding tasks like VideoMME." + }, + { + "type": "table", + "bbox": [ + 0.31, + 0.466, + 0.697, + 0.559 + ], + "angle": 0, + "content": "
MethodDream-1kVidTAB-QA
F1PrecisionRecallAccuracy
Baseline30.633.827.970.7
+SFT31.432.630.271.7
+GRPO38.2(+7.6)45.4(+11.6)33.1(+5.2)72.6(+1.9)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.289, + 0.562, + 0.709, + 0.576 + ], + "angle": 0, + "content": "Table 6: Results of Video Caption and Video Quality Access." + }, + { + "type": "table", + "bbox": [ + 0.309, + 0.601, + 0.697, + 0.688 + ], + "angle": 0, + "content": "
MethodLLM JudgeDream-1k
F1PrecisionRecall
Baseline-30.633.827.9
+ GRPOGPT-3.5-turbo-0125 Qwen2.5-72B37.9(+7.3)44.4(+10.6)33(+5.1)
38.2(+7.6)45.4(+11.6)33.1(+5.2)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.355, + 0.69, + 0.642, + 0.705 + ], + "angle": 0, + "content": "Table 7: Ablation of Video Caption Task." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.73, + 0.828, + 0.787 + ], + "angle": 0, + "content": "Video Caption and Video Quality Assessment. For the Video Caption and Video Quality Assessment tasks, we found that GRPO still demonstrated its advantages over SFT, As shown in Table 6. The significant metric improvements on these two benchmarks demonstrate the effectiveness of our approach." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.801, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Ablation of Reward Evaluators To assess the impact of different large language models (LLMs) as reward evaluators, we conducted parallel experiments using GPT-3.5-turbo-0125 and Qwen2.5-72B as distinct judges (Table 7). Models trained under both evaluators achieved nearly identical performance, demonstrating consistent caption reward generation across LLMs. We attribute this consistency to GRPO's fundamental mechanism: it relies on relative differential scoring within response groups rather than absolute reward values. This confirms that \\( R_{recall} \\) produces discriminative reward signals for predicted captions independent of the choice of LLM judge, validating both the efficacy of our reward design and the stability of its signaling mechanism. Crucially, when guided by these reliable" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.947 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.135 + ], + "angle": 0, + "content": "reward signals, our approach delivers substantial performance gains in description tasks using only limited high-quality data, demonstrating remarkable data-efficiency and significant optimization potential." + }, + { + "type": "table", + "bbox": [ + 0.256, + 0.149, + 0.744, + 0.254 + ], + "angle": 0, + "content": "
Model\nAvg. DurationClue\nPerceptionVideoMME\n1010sLongVideoBench\n473s
Qwen2.5-VL-7B64.456.0
63.3(-1.1)55.2(-0.8)
VideoChat-R1-thinking62.151.9
63.6(+1.5)58.2(+6.3)
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.255, + 0.825, + 0.298 + ], + "angle": 0, + "content": "Table 8: Ablation results of \"Clue-Perception\". It should be noted that due to our adoption of a lower number of input pixels, the absolute performance is not entirely consistent with that reported for Qwen2.5-VL." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.326, + 0.827, + 0.466 + ], + "angle": 0, + "content": "Ablation of \"Clue-Perception\" As shown in Table. 8, we compared the performance changes of the model with and without perception enhancement when applying the \"Clue-Perception\" strategy on two representative long video benchmarks [6, 30]. It is noteworthy that without the use of \"Clue-Perception\", VideoChat-R1 showed no significant performance improvement over Qwen2.5-VL-7B in long video tasks, which can be attributed to the fact that our training dataset consists entirely of short videos under 1 minute. However, after the application of the \"Clue-Perception\" operation, VideoChat-R1 demonstrated a significant performance enhancement, indicating that the clues it provides are more accurate and thus revealing the potential of clue-perception in long video understanding. In contrast, due to its insufficient spatiotemporal perception capability, Qwen2.5-VL-7B even exhibited a performance decline after the implementation of the \"Clue-Perception\" operation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.481, + 0.825, + 0.537 + ], + "angle": 0, + "content": "GRPO vs. SFT. It can be observed that across various types of tasks, GRPO outperforms SFT. Whether it is in terms of the performance on in-domain tasks, out-domain tasks, or the preservation of the original general performance, our experimental results demonstrate that GRPO is a promising fine-tuning approach. We will leave the large-scale comparison for future research." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.552, + 0.825, + 0.623 + ], + "angle": 0, + "content": "Chain-of-thought vs. Direct Output. Based on the video tasks and experiments we have explored, which focus on spatiotemporal perception, the output of the chain of thought has not demonstrated obvious advantages. In some cases, it is even inferior to the direct output. We believe that how to define appropriate video reasoning tasks and evaluation methods remains to be explored. The existing training data is insufficient to activate the model to output truly effective video reasoning chains." + }, + { + "type": "title", + "bbox": [ + 0.223, + 0.683, + 0.263, + 0.694 + ], + "angle": 0, + "content": "Video:" + }, + { + "type": "image", + "bbox": [ + 0.269, + 0.647, + 0.326, + 0.729 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.332, + 0.647, + 0.388, + 0.729 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.395, + 0.647, + 0.452, + 0.729 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.456, + 0.647, + 0.514, + 0.729 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.52, + 0.647, + 0.576, + 0.729 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.581, + 0.647, + 0.638, + 0.729 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.643, + 0.647, + 0.7, + 0.729 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.705, + 0.647, + 0.762, + 0.729 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.223, + 0.73, + 0.442, + 0.743 + ], + "angle": 0, + "content": "Query: A person puts on a pair of shoes." + }, + { + "type": "text", + "bbox": [ + 0.223, + 0.75, + 0.312, + 0.762 + ], + "angle": 0, + "content": "Ground Truth:" + }, + { + "type": "text", + "bbox": [ + 0.334, + 0.75, + 0.521, + 0.762 + ], + "angle": 0, + "content": "1.8s< 9.4s" + }, + { + "type": "text", + "bbox": [ + 0.223, + 0.768, + 0.254, + 0.779 + ], + "angle": 0, + "content": "SFT:" + }, + { + "type": "text", + "bbox": [ + 0.274, + 0.768, + 0.406, + 0.779 + ], + "angle": 0, + "content": "1.3s←---->4.1s" + }, + { + "type": "text", + "bbox": [ + 0.223, + 0.79, + 0.311, + 0.802 + ], + "angle": 0, + "content": "VideoChat-R1:" + }, + { + "type": "text", + "bbox": [ + 0.328, + 0.79, + 0.551, + 0.802 + ], + "angle": 0, + "content": "2.0s 10.7s" + }, + { + "type": "text", + "bbox": [ + 0.221, + 0.804, + 0.752, + 0.853 + ], + "angle": 0, + "content": "\\nThe man is seen standing in front of a pantry, looking down at something on the floor. He then bends down and picks up a pair of shoes from the floor. He then proceeds to put on the shoes. The event \\(\"a person puts on a pair of shoes\"\\) occurs during this time. \\(\\backslash n < /\\) think>\\n< timestep>\\nThe event happens in 2.00 - 10.7 seconds.\\n\\n< answer>\\n2.00 to 10.67\\n" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.87, + 0.825, + 0.899 + ], + "angle": 0, + "content": "Figure 2: Examples on temporal grounding task. VideoChat-R1 gives a more accurate time interval after thinking." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.225, + 0.117, + 0.266, + 0.128 + ], + "angle": 0, + "content": "Video:" + }, + { + "type": "image", + "bbox": [ + 0.268, + 0.101, + 0.342, + 0.143 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.351, + 0.101, + 0.426, + 0.143 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.438, + 0.101, + 0.511, + 0.143 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.522, + 0.101, + 0.597, + 0.143 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.608, + 0.101, + 0.682, + 0.143 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.692, + 0.101, + 0.767, + 0.143 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.227, + 0.145, + 0.63, + 0.158 + ], + "angle": 0, + "content": "Question: Why did the girl suddenly jump up from where she was standing?" + }, + { + "type": "text", + "bbox": [ + 0.227, + 0.165, + 0.378, + 0.176 + ], + "angle": 0, + "content": "Ground Truth: Excited." + }, + { + "type": "text", + "bbox": [ + 0.441, + 0.164, + 0.512, + 0.176 + ], + "angle": 0, + "content": "[20.9s, 25.0s]" + }, + { + "type": "text", + "bbox": [ + 0.62, + 0.163, + 0.674, + 0.175 + ], + "angle": 0, + "content": "E--->" + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.184, + 0.371, + 0.195 + ], + "angle": 0, + "content": "SFT: Dance." + }, + { + "type": "text", + "bbox": [ + 0.441, + 0.184, + 0.512, + 0.195 + ], + "angle": 0, + "content": "[19.4s, 37.2s]" + }, + { + "type": "text", + "bbox": [ + 0.617, + 0.184, + 0.67, + 0.195 + ], + "angle": 0, + "content": "k--" + }, + { + "type": "text", + "bbox": [ + 0.694, + 0.184, + 0.726, + 0.195 + ], + "angle": 0, + "content": "---→" + }, + { + "type": "text", + "bbox": [ + 0.227, + 0.207, + 0.378, + 0.218 + ], + "angle": 0, + "content": "VideoChat-R1: Excited." + }, + { + "type": "text", + "bbox": [ + 0.441, + 0.207, + 0.512, + 0.217 + ], + "angle": 0, + "content": "[20.1s, 28.0s]" + }, + { + "type": "text", + "bbox": [ + 0.619, + 0.206, + 0.689, + 0.217 + ], + "angle": 0, + "content": "<- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.219, + 0.76, + 0.255 + ], + "angle": 0, + "content": " The girl jumps up from her position after playing with the rocket toy and then walks around the room holding it. This suggests that she is excited about the toy and wants to show it off. The jump could be interpreted as an expression of excitement. " + }, + { + "type": "text", + "bbox": [ + 0.227, + 0.256, + 0.495, + 0.268 + ], + "angle": 0, + "content": "E[(20.1,28.0)]" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.286, + 0.825, + 0.316 + ], + "angle": 0, + "content": "Figure 3: Examples on Video QA task. It can be seen that VideoChat-R1 can not only answer questions correctly but also provide relatively accurate reference time periods (clue)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.341, + 0.347, + 0.357 + ], + "angle": 0, + "content": "4.3 Qualitative Results" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.367, + 0.825, + 0.465 + ], + "angle": 0, + "content": "As shown in Figure 2 and 3, we provide visualizations of VideoChat-R1's outputs for the temporal grounding and video QA tasks. We observe that due to the enhanced spatiotemporal perception capability, VideoChat-R1 can more accurately locate video segments relevant to the question and conduct reasonable reasoning to answer the question. However, compared with the complex chains of thought involved in solving mathematical problems or coding tasks in text and image domains, the chains of thought generated in our current video tasks are relatively simple. We will explore more complex video reasoning tasks in future research." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.484, + 0.308, + 0.499 + ], + "angle": 0, + "content": "5 Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.515, + 0.827, + 0.613 + ], + "angle": 0, + "content": "In this work, we systematically investigate the role of reinforcement fine-tuning (RFT) with Group Relative Policy Optimization (GRPO) in enhancing video-centric multimodal large language models (MLLMs). Our experiments demonstrate that RFT is a highly data-efficient paradigm for task-specific improvements, enabling VideoChat-R1—a model trained with limited samples via multi-task RFT—to achieve state-of-the-art performance on spatio-temporal perception tasks while preserving general chat capabilities and exhibiting emergent spatiotemporal reasoning. We believe our work can present relevant insights for future research efforts in reinforcement learning of video MLLMs." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.09, + 0.27, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.113, + 0.826, + 0.141 + ], + "angle": 0, + "content": "[1] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.149, + 0.826, + 0.175 + ], + "angle": 0, + "content": "[2] Zhuo Cao, Bingqing Zhang, Heming Du, Xin Yu, Xue Li, and Sen Wang. Flashvtg: Feature layering and adaptive score handling network for video temporal grounding. arXiv preprint arXiv:2412.13441, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.185, + 0.827, + 0.223 + ], + "angle": 0, + "content": "[3] Huilin Deng, Ding Zou, Rui Ma, Hongchen Luo, Yang Cao, and Yu Kang. Boosting the generalization and reasoning of vision language models with curriculum reinforcement learning. arXiv preprint arXiv:2503.07065, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.233, + 0.825, + 0.272 + ], + "angle": 0, + "content": "[4] Yihe Deng, Hritik Bansal, Fan Yin, Nanyun Peng, Wei Wang, and Kai-Wei Chang. Openthinker: An early exploration to complex vision-language reasoning via iterative self-improvement. arXiv preprint arXiv:2503.17352, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.282, + 0.825, + 0.31 + ], + "angle": 0, + "content": "[5] Kaituo Feng, Kaixiong Gong, Bohao Li, Zonghao Guo, Yibing Wang, Tianshuo Peng, Benyou Wang, and Xiangyu Yue. Video-r1: Reinforcing video reasoning in mllms. arXiv preprint arXiv:2503.21776, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.318, + 0.826, + 0.357 + ], + "angle": 0, + "content": "[6] Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.367, + 0.825, + 0.394 + ], + "angle": 0, + "content": "[7] Jiyang Gao, Chen Sun, Zhenheng Yang, and Ram Nevatia. Tall: Temporal activity localization via language query. In Proceedings of the IEEE international conference on computer vision, pages 5267-5275, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.402, + 0.825, + 0.429 + ], + "angle": 0, + "content": "[8] Aleksandr Gordeev, Vladimir Dokholyan, Irina Tolstykh, and Maksim Kuprashevich. Saliency-guided detr for moment retrieval and highlight detection. arXiv preprint arXiv:2410.01615, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.438, + 0.825, + 0.477 + ], + "angle": 0, + "content": "[9] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.486, + 0.825, + 0.525 + ], + "angle": 0, + "content": "[10] Lianghua Huang, Xin Zhao, and Kaiqi Huang. Got-10k: A large high-diversity benchmark for generic object tracking in the wild. IEEE transactions on pattern analysis and machine intelligence, 43(5): 1562–1577, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.535, + 0.825, + 0.574 + ], + "angle": 0, + "content": "[11] Zhenpeng Huang, Xinhao Li, Jiaqi Li, Jing Wang, Xiangyu Zeng, Cheng Liang, Tao Wu, Xi Chen, Liang Li, and Limin Wang. Online video understanding: A comprehensive benchmark and memory-augmented method. arXiv preprint arXiv:2501.00584, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.583, + 0.825, + 0.622 + ], + "angle": 0, + "content": "[12] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.632, + 0.825, + 0.659 + ], + "angle": 0, + "content": "[13] Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. Dense-captioning events in videos. In Proceedings of the IEEE international conference on computer vision, pages 706–715, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.668, + 0.825, + 0.695 + ], + "angle": 0, + "content": "[14] KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.704, + 0.827, + 0.743 + ], + "angle": 0, + "content": "[15] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mybench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.753, + 0.825, + 0.79 + ], + "angle": 0, + "content": "[16] Xinhao Li, Zhenpeng Huang, Jing Wang, Kunchang Li, and Limin Wang. Videoeval: Comprehensive benchmark suite for low-cost evaluation of video foundation model. arXiv preprint arXiv:2407.06491, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.801, + 0.825, + 0.84 + ], + "angle": 0, + "content": "[17] Xinhao Li, Yi Wang, Jiashuo Yu, Xiangyu Zeng, Yuhan Zhu, Haian Huang, Jianfei Gao, Kunchang Li, Yinan He, Chenting Wang, et al. Videochat-flash: Hierarchical compression for long-context video modeling. arXiv preprint arXiv:2501.00574, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.849, + 0.825, + 0.876 + ], + "angle": 0, + "content": "[18] Yuqi Liu, Bohao Peng, Zhisheng Zhong, Zihao Yue, Fanbin Lu, Bei Yu, and Jiaya Jia. Seg-zero: Reasoning-chain guided segmentation via cognitive reinforcement. arXiv preprint arXiv:2503.06520, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.885, + 0.825, + 0.912 + ], + "angle": 0, + "content": "[19] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.113, + 0.827, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.131 + ], + "angle": 0, + "content": "[20] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-chatgpt: Towards detailed video understanding via large vision and language models. arXiv preprint arXiv:2306.05424, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.139, + 0.827, + 0.19 + ], + "angle": 0, + "content": "[21] Viorica Patraucean, Lucas Smaira, Ankush Gupta, Adria Recasens, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Mateusz Malinowski, Yi Yang, Carl Doersch, et al. Perception test: A diagnostic benchmark for multimodal video models. Advances in Neural Information Processing Systems, 36: 42748-42761, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.199, + 0.825, + 0.239 + ], + "angle": 0, + "content": "[22] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b lmms with strong reasoning abilities through two-stage rule-based r1. arXiv preprint arXiv:2503.07536, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.246, + 0.825, + 0.286 + ], + "angle": 0, + "content": "[23] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.293, + 0.826, + 0.334 + ], + "angle": 0, + "content": "[24] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.341, + 0.824, + 0.369 + ], + "angle": 0, + "content": "[25] Jiawei Wang, Liping Yuan, Yuchen Zhang, and Haomiao Sun. Tarsier: Recipes for training and evaluating large video description models. arXiv preprint arXiv:2407.00634, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.377, + 0.825, + 0.416 + ], + "angle": 0, + "content": "[26] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.424, + 0.825, + 0.463 + ], + "angle": 0, + "content": "[27] Ye Wang, Boshen Xu, Zihao Yue, Zihan Xiao, Ziheng Wang, Liang Zhang, Dingyi Yang, Wenxuan Wang, and Qin Jin. Timezero: Temporal video grounding with reasoning-guided lvm. arXiv preprint arXiv:2503.13377, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.471, + 0.826, + 0.512 + ], + "angle": 0, + "content": "[28] Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Guo Chen, Baoqi Pei, Rongkun Zheng, Zun Wang, Yansong Shi, et al. Internvideo2: Scaling foundation models for multimodal video understanding. In European Conference on Computer Vision, pages 396-416. Springer, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.519, + 0.826, + 0.559 + ], + "angle": 0, + "content": "[29] Yi Wang, Xinhao Li, Ziang Yan, Yinan He, Jiashuo Yu, Xiangyu Zeng, Chenting Wang, Changlian Ma, Haian Huang, Jianfei Gao, et al. Internvideo2.5: Empowering video mllms with long and rich context modeling. arXiv preprint arXiv:2501.12386, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.566, + 0.826, + 0.606 + ], + "angle": 0, + "content": "[30] Haoning Wu, Dongxu Li, Bei Chen, and Junnan Li. Longvideobench: A benchmark for long-context interleaved video-language understanding. Advances in Neural Information Processing Systems, 37: 28828-28857, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.613, + 0.825, + 0.654 + ], + "angle": 0, + "content": "[31] Junbin Xiao, Angela Yao, Yicong Li, and Tat-Seng Chua. Can i trust your answer? visually grounded video question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13204-13214, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.661, + 0.825, + 0.688 + ], + "angle": 0, + "content": "[32] Yifan Xu, Xinhao Li, Yichun Yang, Rui Huang, and Limin Wang. Fine-grained video-text retrieval: A new benchmark and method. arXiv preprint arXiv:2501.00513, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.695, + 0.826, + 0.736 + ], + "angle": 0, + "content": "[33] Ziang Yan, Zhilin Li, Yinan He, Chenting Wang, Kunchang Li, Xinhao Li, Xiangyu Zeng, Zilei Wang, Yali Wang, Yu Qiao, et al. Task preference optimization: Improving multimodal large language models with vision task alignment. arXiv preprint arXiv:2412.19326, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.743, + 0.824, + 0.771 + ], + "angle": 0, + "content": "[34] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.778, + 0.826, + 0.817 + ], + "angle": 0, + "content": "[35] Yi Yang, Xiaoxuan He, Hongkun Pan, Xiyan Jiang, Yan Deng, Xingtao Yang, Haoyu Lu, Dacheng Yin, Fengyun Rao, Minfeng Zhu, et al. R1-onevision: Advancing generalized multimodal reasoning through cross-modal formalization. arXiv preprint arXiv:2503.10615, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.825, + 0.826, + 0.865 + ], + "angle": 0, + "content": "[36] En Yu, Liang Zhao, Yana Wei, Jinrong Yang, Dongming Wu, Lingyu Kong, Haoran Wei, Tiancai Wang, Zheng Ge, Xiangyu Zhang, et al. Merlin: Empowering multimodal llms with foresight minds. In European Conference on Computer Vision, pages 425-443. Springer, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.873, + 0.826, + 0.913 + ], + "angle": 0, + "content": "[37] Xiangyu Zeng, Kunchang Li, Chenting Wang, Xinhao Li, Tianxiang Jiang, Ziang Yan, Songze Li, Yansong Shi, Zhengrong Yue, Yi Wang, et al. Timesuite: Improving mllms for long video understanding via grounded tuning. arXiv preprint arXiv:2410.19702, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.133 + ], + "angle": 0, + "content": "[38] Yufei Zhan, Yousong Zhu, Shurong Zheng, Hongyin Zhao, Fan Yang, Ming Tang, and Jinqiao Wang. Vision-r1: Evolving human-free alignment in large vision-language models via vision-guided reinforcement learning. arXiv preprint arXiv:2503.18013, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.14, + 0.826, + 0.18 + ], + "angle": 0, + "content": "[39] Yufei Zhan, Yousong Zhu, Shurong Zheng, Hongyin Zhao, Fan Yang, Ming Tang, and Jinqiao Wang. Vision-r1: Evolving human-free alignment in large vision-language models via vision-guided reinforcement learning. arXiv preprint arXiv:2503.18013, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.187, + 0.826, + 0.227 + ], + "angle": 0, + "content": "[40] Jingyi Zhang, Jiaxing Huang, Huanjin Yao, Shunyu Liu, Xikun Zhang, Shijian Lu, and Dacheng Tao. R1-vl: Learning to reason with multimodal large language models via step-wise group relative policy optimization. arXiv preprint arXiv:2503.12937, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.235, + 0.826, + 0.263 + ], + "angle": 0, + "content": "[41] Yuanhan Zhang, Jinming Wu, Wei Li, Bo Li, Zejun Ma, Ziwei Liu, and Chunyuan Li. Video instruction tuning with synthetic data. arXiv preprint arXiv:2410.02713, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.27, + 0.826, + 0.298 + ], + "angle": 0, + "content": "[42] Jiaxing Zhao, Xihan Wei, and Liefeng Bo. R1-omni: Explainable omni-multimodal emotion recognition with reinforcement learning. arXiv e-prints, pages arXiv-2503, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.305, + 0.826, + 0.334 + ], + "angle": 0, + "content": "[43] Hengguang Zhou, Xinui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. R1-zero's\" aha moment\" in visual reasoning on a 2b non-sft model. arXiv preprint arXiv:2503.05132, 2025." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.334 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "12" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06958/6f298a94-d9f1-4eed-a03e-720f559a2c57_origin.pdf b/data/2025/2504_06xxx/2504.06958/6f298a94-d9f1-4eed-a03e-720f559a2c57_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..eef70724f7450ddad66e097abf58b23a6feb7b05 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/6f298a94-d9f1-4eed-a03e-720f559a2c57_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3f1c06379971a3789ca2a7c66ed0af6777a69c468d8a49e95e13ec9d351cb08 +size 2892127 diff --git a/data/2025/2504_06xxx/2504.06958/full.md b/data/2025/2504_06xxx/2504.06958/full.md new file mode 100644 index 0000000000000000000000000000000000000000..ab2122a18dde1704e2713f0882ddab616880c55c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/full.md @@ -0,0 +1,347 @@ +# VideoChat-R1: Enhancing Spatio-Temporal Perception via Reinforcement Fine-Tuning + +Xinhao Li $^{2,1*}$ , Ziang Yan $^{3,1*}$ , Desen Meng $^{2*}$ , Lu Dong $^{4,1}$ , Xiangyu Zeng $^{2,1}$ , Yinan He $^{1}$ , Yali Wang $^{6,1}$ , Yu Qiao $^{1,6}$ , Yi Wang $^{\dagger 1,5}$ , Limin Wang $^{\dagger 2,1}$ + +$^{1}$ Shanghai AI Laboratory $^{2}$ Nanjing University $^{3}$ Zhejiang University $^{4}$ University of Science and Technology of China $^{5}$ Shanghai Innovation Institute + +Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences https://github.com/OpenGVLab/VideoChat-R1 + +# Abstract + +Reinforcement Learning (RL) benefits Large Language Models (LLMs) for complex reasoning. Inspired by this, we explore integrating spatio-temporal specific rewards into Multimodal Large Language Models (MLLMs) to address the unique challenges of video understanding, such as long-range temporal associations. This paper investigates how rule-based rewards, particularly temporal ones, can improve video reasoning and their generalizability. Our study proposes Reinforcement Fine-Tuning (RFT) as a data-efficient method to enhance video reasoning on specific tasks without sacrificing original capabilities. Through joint RFT on multiple spatio-temporal perception tasks, we developed VideoChat-R1, a powerful Video MLLM. VideoChat-R1 achieves state-of-the-art spatio-temporal perception, demonstrating significant improvements in tasks like temporal grounding (+31.8) and object tracking (+31.2), while also improving general QA benchmarks. The enhanced perception and preserved chat abilities contribute to a more reliable video dialogue system, leading to our "Temporal Clue-driven Reasoning" inference schema. This work provides a foundation for developing robust, real-world video comprehension agents. + +# 1 Introduction + +The integration of reinforcement learning (RL) has notably propelled the capabilities of large language models (LLMs) forward, particularly in complex reasoning. Studies, such as o-series from OpenAI [12] and R1 of DeepSeek [9], demonstrated that test-time scaling improves model reasoning and such reasoning is elicit-able via only rule-based rewarding. Inspired by these advances, community is interested in tuning multimodal large language models (MLLMs) with vision-related verifications in Group Relative Policy Optimization (GRPO) [9] manner [43, 18, 38, 3, 22, 19, 35, 40, 39, 4]. They primarily consider visual mathematical reasoning and spatial localization. + +Regarding video understanding, reasoning is also crucial as some of its core abilities (e.g. its long-range temporal association, fine-grained spatiotemporal understanding based on user queries) are barely addressed via perception or simple analysis. Initial works [42, 27, 5] have validated the superiority of the GRPO algorithm over supervised fine-tuning in some specific video tasks, such as temporal grounding and video question answer. Considering advancing the agent development with real-world video comprehension, we need to investigate and integrate spatio-temporal specific rewards or verification mechanisms directly into the learning of current MLLMs in a scalable manner. This could foster a more tightly coupled, near-closed-loop learning environment, enabling MLLMs to master intricate spatio-temporal skills. While our current work may not fully achieve this ambitious + +![](images/6882c68a3a26b0712c0adff2c4095059248463c416caf7d765c85c49ea62ab41.jpg) +Figure 1: Overview of VideoChat-R1. Through reinforcement learning fine-tuning using GRPO, VideoChat-R1 has powerful spatio-temporal perception capabilities and can apply these capabilities in chatting scenarios. + +goal, it endeavors to lay foundational groundwork for the community by exploring how to embed such mechanisms effectively. Specifically, we in this paper chart the landscape of how rule-based rewards (especially temporal related ones) work in video understanding, along with systematic evaluations of the its generalizability across diverse video-based reasoning scenarios. + +Our study begins with spatio-temporal perception, extending to other popular video tasks (e.g. video caption), forming a practical recipe to enhance video reasoning via proper Reinforcement Fine-Tuning (RFT). We evaluate the robustness of various video verifications through different training and testing setting, also benchmarking their respective data requirements. Moreover, how these rewards interact with each other are also systematically explored. Based on these experiences, we give an intuitive and effective inference schema named "Temporal Clue-driven Reasoning". Generally, our main findings are as follows. + +- Reinforcement fine-tuning is data-efficient for enhancing models on specific tasks without sacrificing original capabilities. With a small amount of data, training via RFT can yield a remarkable improvement in spatio-temporal perception ability, and there is negligible impact on the performance of out-domain tasks and the original general capabilities of the model, which outperforms traditional supervised fine-tuning significantly. +- Through joint reinforcement fine-tuning on multiple spatio-temporal perception tasks, we construct VideoChat-R1, a powerful Video MLLM that boasts state-of-the-art spatiotemporal perception capabilities while also taking into account chat abilities. We have also discovered that training on spatio-temporal perception tasks has slightly strengthened the model's spatio-temporal reasoning abilities. Compared with Qwen2.5-VL-7B, VideoChat-R1 achieves several times the performance improvement in spatiotemporal perception tasks such as temporal grounding (+31.8) and object track (+31.2). At the same time, it also achieves non-trivial improvements on general QA benchmarks, such as VideoMME (+0.9), MVBench (+1.0), and Perception Test (+0.9) +- The improvement of spatio-temporal perception ability and the preservation of the original chat capability can contribute to a more reliable and efficient video dialogue system. Our VideoChat-R1 can provide reference video segments when answering users' questions. Meanwhile, we propose to utilize these video segments for "Temporal Clue-driven Reasoning" to further obtain more accurate answers. Our experimental results reveal the potential of the approach that enhances the model's spatiotemporal perception ability through reinforcement learning for future research in the directions of reliable video dialogue systems and long video understanding. + +# 2 Related work + +Reasoning in MLLMs. The field LLMs has witnessed significant breakthroughs in reasoning, exemplified by recent works [12, 9]. These developments [23, 9, 24] have markedly improved LLMs' proficiency in tackling complex, multi-step tasks, such as challenging mathematical problems and coding exercises. Extending these RL techniques to multimodal large language models (MLLMs) has seen numerous efforts [43, 18, 38, 3, 22, 19, 35, 40, 39, 4] focused on leveraging verifiable reward mechanisms to boost visual reasoning performance. However, the application of RL-based strategies to spatio-temporal reasoning within the video domain remains comparatively less explored, with a few studies [27, 42, 5] investigating this area. [27] and [42] show GRPO's potential in temporal grounding and sentiment analysis, while, [5] demonstrates how GRPO can facilitate temporal reasoning. + +Spatio-Temporal Understanding with MLLMs. Video understanding heavily relies on spatiotemporal perception. Progress in Video Multimodal Large Language Models (video MLLMs) [14, 20, 15, 28, 41, 29, 17, 11, 1] addresses video question answering and captioning in the unfed dialogue format, leveraging LLMs to organize predictions over visual perceptions. [36] and [37] leverage data augmentation for temporal modeling, yet compromising their general performance. [33] improves fine-grained spatio-temporal perception by task preference optimization with solid supervised fine-tuning using a lot of data. + +# 3 Methodology + +We present how to exploit the spatio-temporal rewards to improve MLLMs in video domains using GRPO. Before that, we revisit the Group Relative Policy Optimization (GRPO) [23] first. + +# 3.1 Preliminary + +Group Relative Policy Optimization (GRPO) [23] compares groups of candidates responses directly, independent on critic models. Regarding this, it significantly lowers training resources. Formally, with an input query $q$ , GRPO initiates by generating a set of $G$ distinct candidate responses, denoted as $o = \{o_1, \dots, o_G\}$ , via policy sampling. Each of these candidates is then assigned a score from a predefined reward function, yielding $R_1, \dots, R_G$ . To normalize these scores and ascertain the relative quality of each response, GRPO computes their mean and standard deviation: + +$$ +A _ {i} = \frac {R _ {i} - \operatorname {m e a n} \left(\left\{R _ {i} \right\} _ {i = 1} ^ {G}\right)}{\operatorname {s t d} \left(\left\{R _ {i} \right\} _ {i = 1} ^ {G}\right)}. \tag {1} +$$ + +Here $A_{i}$ quantifies the relative quality of the $i$ -th candidate response within its group. The objective of GRPO is to encourage the model to prioritize and generate responses that achieve higher relative scores within such groups. Furthermore, the overall training objective incorporates a KL-divergence term $\mathrm{D}_{\mathrm{KL}}(\cdot |\cdot)$ , which serves to constrain the optimized policy $\pi_{\theta}$ from diverging excessively from the original MLLM parameters $\pi_{\mathrm{ref}}$ . + +# 3.2 Spatio-Temporal Rewards of Video MLLM in GRPO + +We explore how to use GRPO to enhance the performance of Video MLLM in video-language understanding. We consider the five most common types of video related tasks: temporal grounding, object tracking, video question answering, captioning, and quality assessment in our experiments. + +Format reward. To enable the model to output responses in the format we desire. For example, we expect the model to enclose its thought process with ... and the answer with ..., we designed a format reward $R_{\mathrm{format}}$ for each task. We use regular expression matching to determine whether the model adheres to our specified format: + +$$ +R _ {\text {f o r m a t}} = \left\{ \begin{array}{l l} 1, & \text {i f o u t p u t m a t c h e s f o r m a t}, \\ 0, & \text {i f o u t p u t d o e s n ’ t m a t c h f o r m a t}. \end{array} \right. \tag {2} +$$ + +IoU reward in spatio-temporal perception. For the spatio-temporal perception such as temporal grounding and object tracking, it requires the Video MLLM to output the time interval in the video that is associated with the content of a given textual query. Evidently, we can use the Intersection over Union (IoU) between the predicted interval by the model and the ground-truth interval as the reward function. This reward function effectively characterizes the accuracy of the interval predicted by the model. + +$$ +R _ {\mathrm {I o U}} = \frac {\left| \mathcal {I} _ {\text {p r e d}} \cap \mathcal {I} _ {\mathrm {g t}} \right|}{\left| \mathcal {I} _ {\text {p r e d}} \cup \mathcal {I} _ {\mathrm {g t}} \right|}, \tag {3} +$$ + +where $\mathcal{I}_{\mathrm{pred}}$ and $\mathcal{I}_{\mathrm{gt}}$ are the predicted and the ground truth of time intervals or detection boxes, respectively. + +Accuracy reward in classification. Discriminative tasks, such as multiple-choice video question answering and classification, aim to determine whether the model's prediction is consistent with the answer to the question. Therefore, we define: + +$$ +R _ {\text {a c c u a c y}} = \left\{ \begin{array}{l l} 0, & \text {i f} A _ {\text {p r e d}} \neq A _ {\text {g t}} \\ 1, & \text {i f} A _ {\text {p r e d}} = A _ {\text {g t}}, \end{array} \right. \tag {4} +$$ + +where $A_{\mathrm{pred}}$ and $A_{\mathrm{gt}}$ denote the predicted and the ground truth answers, respectively. + +Recall reward in video captioning. For tasks like video captioning with open-ended outputs, it is impossible to simply compare and determine the gap between the generated caption and the ground truth caption. Therefore, we use a LLM as a "judge" to provide a reward score. In order to reduce the uncertainty of the evaluation criteria for the LLM, we first make the LLM decompose the ground truth and predicted captions into events list. Specifically, we utilize Qwen2.5-72B [34] to extract the events in the description and judge whether the events in a ground truth description can be entailed by the description predicted by the model. We calculate the event recall score as the ratio of events in a ground truth description that are entailed by the predicted description, and set different rewards according to the event recall score: + +$$ +R _ {\text {r e c a l l}} = \operatorname {R e c a l l} _ {\text {e v e n t}} \left(C _ {\text {p r e d}}, C _ {\text {g t}}\right), \tag {5} +$$ + +where $C_{\mathrm{pred}}$ and $C_{\mathrm{gt}}$ represent the predicted and the ground truth captions, respectively. + +By combining the above reward functions, we explored how to utilize GRPO to enhance the performance of Video MLLM in various tasks. The specific details can be found in the Section 4. + +# 3.3 Enhance Spatio-Temporal Perception of Video MLLM through GRPO + +The combination of reward functions. We adopt different combinations of reward functions for training in different tasks. Specifically, for the temporal grounding and object tracking task, $R_{\mathrm{st}} = R_{\mathrm{format}} + R_{\mathrm{IoU}}$ . For the multi-choice QA and video quality assessment, $R_{\mathrm{qa}} = R_{\mathrm{format}} + R_{\mathrm{accuracy}}$ . For the multi-choice QA with glue (e.g. Grounding QA), $R_{\mathrm{gqa}} = R_{\mathrm{format}} + R_{\mathrm{IoU}} + R_{\mathrm{Acc}}$ . For the video caption, $R_{\mathrm{cap}} = R_{\mathrm{format}} + R_{\mathrm{Caption}}$ . + +VideoChat-R1-18k. We collect diverse video corpus from existing public video datasets for reinforcement learning training to enhance the model's spatiotemporal perception ability. For the temporal grounding task, we use the training set of Charade - STA [7] (5,338 samples) for training. For the object tracking task, training is conducted on the GoT - 10k [10] dataset, which has 9,335 samples. For the QA and grounding QA tasks, the validation set of NExTGQA [31] (3,358 samples) is used for training. For video captioning, FIBER-1k [32] (1,000 samples) is adopted for training. For video quality assessment, we use the quality assessment task from VidTAB [16] under the 100-shot setting, with 200 samples for training. Finally, for the training of VideoChat-R1, we perform joint training on three spatio-temporal perception-related tasks: temporal grounding, object tracking, and grounding QA. In total, 18,031 samples are used for training. + +Algorithm 1: VideoChat $\mathcal{M}$ with "Clue-Perception" +```txt +Input: $V_{\mathrm{low}}$ : Low-resolution/low-fps vi +``` + +```txt +$Q$ :User question +``` + +```txt +$\Delta_{\mathrm{res}}$ : Resolution boost factor +``` + +```latex +$\Delta_{\mathrm{fps}}$ : Frame rate boost factor +``` + +```txt +Output: $A^{(2)}$ : Final refined answer +``` + +```txt +Initial Answer Generation: +``` + +```latex +$(A^{(1)},\mathcal{C})\gets \mathcal{M}(V_{\mathrm{low}},Q)$ +``` + +```txt +Clue Processing & Upsampling: +``` + +```txt +$\mathcal{T}_c \gets$ ExtractTemporalClues $(\mathcal{C})$ +``` + +```txt +$V_{\mathrm{seg}} \gets \mathrm{SelectSegments}(V_{\mathrm{low}}, \mathcal{T}_c)$ +``` + +```latex +$V_{\mathrm{high}} \gets$ Upsample $(V_{\mathrm{seg}}, \Delta_{\mathrm{res}}, \Delta_{\mathrm{fps}})$ +``` + +```txt +Final Answer Generation: +``` + +```latex +$A^{(2)}\gets \mathcal{M}(V_{\mathrm{high}},Q)$ +``` + +```lua +return $A^{(2)}$ +``` + +Chat with Enhanced Spatio-Temporal Perception. After enhancing the spatiotemporal perception capability of MLLMs, we can construct a more reliable video chat system. Specifically, after the model answers a user's question, it can provide relatively accurate clues that support the answer to that question. We can further leverage these clues to improve the accuracy of the responses. Herein, we propose a simple "Temporal Clue-driven Reasoning" schema: after the model gives the first answer, we re-input the video segments corresponding to the obtained clues into the model at a higher resolution and frame rate, prompting it to answer again. By perceiving more details, the model can generate more accurate responses. Additionally, this operation is also well-suited for long video understanding under conditions of limited computing resources. + +# 4 Experiments + +Implementation details. The main experiments are all conducted based on Qwen2.5-VL-7B [1] (except for the video captioning, for which Qwen2-VL-7B [26] is used). + +**Benchmarks.** We employ MVBench [15], Perception Test [21], VideoMME [6] for evaluation of general video understanding. Given that the majority of videos in our training set are short-length, we only use the short subset of VideoMME in testing. For the temporal grounding task, we use the test set of Charade-STA [7] for in-domain testing and the test set of ActivityNet-Grounding [13] as out-domain test data. For the object tracking task, testing is done using the GoT-10k [10] dataset. For the QA and grounding QA tasks, the test set of NExTGQA [31] is used for testing. And we use Dream-1k [25] and VidTAB-QA [16] for the video captioning and video quality access. + +# 4.1 Evaluation of VideoChat-R1 + +
MethodCharades-STAActivityNetNExTGQAGoTVideoMMEMVBenchPeception Test
mIoUR@0.5R@0.7mIoUR@0.5R@0.7mIoUaccOverlapR@0.5Short-AvgAvgVal
Baseline
Qwen2.5-VL-7B29.024.211.121.115.87.515.459.512.61.171.366.969.1
SFT on specific tasks
+SFT w/ Charades-STA46.345.025.320.616.77.9----N/A*N/A*N/A*
+SFT w/ GoT--------41.829.559.258.658.5
+SFT w/ NExTGQA------28.264.8--60.159.260.7
GRPO on various tasks
VideoChat-R160.871.750.236.633.417.732.470.643.838.272.267.970.0
VideoChat-R1-thinking59.970.647.235.533.316.736.169.243.333.974.266.269.6
+ +Table 1: Results of VideoChat-R1 on various Video Benchmarks. * indicates that the model has suffered from overfitting and is unable to answer the question properly. Since the number of input pixels is fixed during our evaluation, the baseline results are slightly lower than those reported in their origin paper [1]. + +As shown in Table 1, after training with GRPO on spatio-temporal perception datasets, both VideoChat-R1 and VideoChat-R1-thinking significantly outperform the performance of Qwen2.5-VL and that of models fine-tuned through SFT for a single specific task across various spatiotemporal perception benchmarks and the general understanding benchmark VideoMME. This validates the effectiveness of our approach, which leverages multiple spatiotemporal perception datasets and RFT for enhancing spatiotemporal perception. + +Meanwhile, we observe that for spatio-temporal perception tasks, engaging in thinking processes does not necessarily lead to performance gains. However, for tasks such as QA and VideoMME, which may require complex reasoning, conducting inferences during testing can result in notable performance improvements. + +# 4.2 Ablation Studies and Discussions + +Muti-task Co-training. As shown in Table 2, we found that mixed training of different spatiotemporal perception tasks using GRPO can yield a synergistic improvement effect. Training with the multiple tasks achieves nearly the best results across all benchmarks. This reveals the potential of GRPO for larger-scale and multi-task collaborative training in the future. + +
MethodCharades-STAANetNExTGQAGoTVideoMME
mIoUR@0.5R@0.7mIoUR@0.5R@0.7mIoUaccOverlapR@0.5Short-Avg
Qwen2.5-VL-7B29.024.211.121.115.87.515.459.512.61.171.3
+GRPO w/ STA59.370.446.030.727.512.931.461.227.812.972.6
+GRPO w/GQA36.033.515.524.920.610.735.168.736.126.772.0
+GRPO w/ GoT28.725.19.620.116.26.815.660.542.530.671.4
+GRPO w/ STA-GQA59.869.747.033.731.016.035.767.736.528.972.2
+GRPO w/ STA-GQA-GoT60.871.750.236.633.417.732.470.643.838.272.2
+ +Table 2: Ablation results of Cotraining on Spatio-Temporal Tasks. + +
MethodEpochsTraining PromptTest PromptCharades-STA (in domain)ActivityNet (out domain)VideoMME
ThinkAnswerThinkAnswermIoUR@0.3R@0.5R@0.7mIoUR@0.3R@0.5R@0.7Short-Avg
Vision Experts
FlashVTG [2]-------70.349.9-----
InternVideo2-6B [28]-------70.049.0-----
SG-DETR [8]--71.152.8-----
MLLMs
Qwen2.5-VL-7B (baseline)---29.044.724.211.121.128.315.87.471.3
---28.141.823.411.117.722.713.47.771.3
+SFT146.363.945.025.320.630.216.77.9N/A*(-71.3)
334.6(+6.5)51.736.320.617.3(-3.8)26.110.03.9N/A*(-71.3)
+GRPO158.780.967.745.431.946.328.814.172.6
159.3(+31.2)81.770.446.030.7(+13.0)45.027.512.973.6(+2.3)
361.3(+33.2)83.172.851.534.3(+16.6)50.432.216.270.9(-0.4)
+ +Table 3: Ablation results of Temporal Grounding Task. * indicates that the model has suffered from overfitting and is unable to answer the question properly. + +
MethodGoTVideoMME
Average overlapR@0.5R@0.7Short-Avg
Qwen2.5-VL-7B12.61.1071.3
+SFT41.829.53.959.2
+GRPO42.5(+29.9)30.6(+29.5)3.9(+3.9)71.4(+0.1)
+ +Table 4: Ablation results of Object Tracking. We use 8 frames as input for training and evaluation. + +Temporal Grounding and Object tracking. As shown in Table 3 and Table 4, fine-tuning Qwen2.5-VL using GRPO significantly improves the performance of temporal grounding and object tracking tasks. Additionally, it slightly enhances the performance on the general understanding benchmark VideoMME. Even when training for more epochs, GRPO is less prone to overfitting compared to SFT. Instead, it can continuously improve the performance of temporal grounding, eventually surpassing the performance of previous expert models. Moreover, stimulating the model's thinking ability provides some benefits for both temporal grounding and VideoMME tasks. + +
MethodTraining PromptTest PromptNExTGQAVideoMME
ThinkAnswerGlueThinkAnswerGluemIoUaccShort-Avg
Direct Output
Qwen2.5-VL-7B (baseline)-41.771.3
15.459.5-
+SFT-65.160.2
28.2(+12.8)64.8(+5.3)60.1(-11.2)
+GRPO-70.171.7
16.270.271.7
35.1(+19.7)68.7(+9.2)72.0(+0.7)
Chain-of-thought Output
Qwen2.5-VL-7B-47.773.0
20.253.372.2
+GRPO-68.874.7
32.9(+12.7)66.9(+13.6)75.3(+3.1)
+ +Video Question Answer. As shown in Table 5, for the video question answering task, we selected the multi-choice QA task, which is easy to evaluate, for our experiments. Additionally, we explored the grounding QA task. In this task, when answering questions, the model is required to simultaneously provide the temporal cues on which its answers are based. Using merely a little over three thousand training data samples, we found that GRPO demonstrated remarkable fine-tuning capabilities. Not only did it lead to a substantial improvement in the performance of the NExTGQA task, but it also brought about a noticeable enhancement in the VideoMME task. We noticed that, unlike the previous strongly spatiotemporal perception tasks such as temporal grounding, thinking played a significant role in the QA task. Meanwhile, the glue signals also provided some assistance for relatively complex video understanding tasks like VideoMME. + +Table 5: Ablation results of Multi-Choice Video QA. + +
MethodDream-1kVidTAB-QA
F1PrecisionRecallAccuracy
Baseline30.633.827.970.7
+SFT31.432.630.271.7
+GRPO38.2(+7.6)45.4(+11.6)33.1(+5.2)72.6(+1.9)
+ +Table 6: Results of Video Caption and Video Quality Access. + +
MethodLLM JudgeDream-1k
F1PrecisionRecall
Baseline-30.633.827.9
+ GRPOGPT-3.5-turbo-0125 Qwen2.5-72B37.9(+7.3)44.4(+10.6)33(+5.1)
38.2(+7.6)45.4(+11.6)33.1(+5.2)
+ +Table 7: Ablation of Video Caption Task. + +Video Caption and Video Quality Assessment. For the Video Caption and Video Quality Assessment tasks, we found that GRPO still demonstrated its advantages over SFT, As shown in Table 6. The significant metric improvements on these two benchmarks demonstrate the effectiveness of our approach. + +Ablation of Reward Evaluators To assess the impact of different large language models (LLMs) as reward evaluators, we conducted parallel experiments using GPT-3.5-turbo-0125 and Qwen2.5-72B as distinct judges (Table 7). Models trained under both evaluators achieved nearly identical performance, demonstrating consistent caption reward generation across LLMs. We attribute this consistency to GRPO's fundamental mechanism: it relies on relative differential scoring within response groups rather than absolute reward values. This confirms that $R_{recall}$ produces discriminative reward signals for predicted captions independent of the choice of LLM judge, validating both the efficacy of our reward design and the stability of its signaling mechanism. Crucially, when guided by these reliable + +reward signals, our approach delivers substantial performance gains in description tasks using only limited high-quality data, demonstrating remarkable data-efficiency and significant optimization potential. + +
Model +Avg. DurationClue +PerceptionVideoMME +1010sLongVideoBench +473s
Qwen2.5-VL-7B64.456.0
63.3(-1.1)55.2(-0.8)
VideoChat-R1-thinking62.151.9
63.6(+1.5)58.2(+6.3)
+ +Table 8: Ablation results of "Clue-Perception". It should be noted that due to our adoption of a lower number of input pixels, the absolute performance is not entirely consistent with that reported for Qwen2.5-VL. + +Ablation of "Clue-Perception" As shown in Table. 8, we compared the performance changes of the model with and without perception enhancement when applying the "Clue-Perception" strategy on two representative long video benchmarks [6, 30]. It is noteworthy that without the use of "Clue-Perception", VideoChat-R1 showed no significant performance improvement over Qwen2.5-VL-7B in long video tasks, which can be attributed to the fact that our training dataset consists entirely of short videos under 1 minute. However, after the application of the "Clue-Perception" operation, VideoChat-R1 demonstrated a significant performance enhancement, indicating that the clues it provides are more accurate and thus revealing the potential of clue-perception in long video understanding. In contrast, due to its insufficient spatiotemporal perception capability, Qwen2.5-VL-7B even exhibited a performance decline after the implementation of the "Clue-Perception" operation. + +GRPO vs. SFT. It can be observed that across various types of tasks, GRPO outperforms SFT. Whether it is in terms of the performance on in-domain tasks, out-domain tasks, or the preservation of the original general performance, our experimental results demonstrate that GRPO is a promising fine-tuning approach. We will leave the large-scale comparison for future research. + +Chain-of-thought vs. Direct Output. Based on the video tasks and experiments we have explored, which focus on spatiotemporal perception, the output of the chain of thought has not demonstrated obvious advantages. In some cases, it is even inferior to the direct output. We believe that how to define appropriate video reasoning tasks and evaluation methods remains to be explored. The existing training data is insufficient to activate the model to output truly effective video reasoning chains. + +# Video: + +![](images/14772caaea820bce8abfff3fb25fe055a3d3f489303d7e859376b1f66919cc82.jpg) +Query: A person puts on a pair of shoes. + +![](images/dc2e7f5b87eda42235292244baad30099c0260240ad371faa2c3a5a56e9b7c19.jpg) +Figure 2: Examples on temporal grounding task. VideoChat-R1 gives a more accurate time interval after thinking. + +![](images/8b7f6b3d5a683c9e85bc2a6d1964ab2ba421b06778bf9d9a77bfa254bc48d01d.jpg) + +![](images/4c1eda96c7469788fc0f4eed659c9324e2340ecd108e41f3e47883d9abc2b1d2.jpg) + +![](images/d4d21b6c2f0500b2d1c3036da4a9e1bf736b322c6df991e43cddd7bcba3cce63.jpg) + +![](images/3becb2134d2fda7594ca6af18f1f2dab9032a5d21f1b230980bb26d2653d10ee.jpg) + +![](images/065345b3bee4b365c92af6880c7186140669d936b910a2cc42e2993ef88b5aea.jpg) + +![](images/eaad3848c3f2cbd11d2d60de002994c397acb95d6539b0e3b8f9c88788af4071.jpg) + +Ground Truth: + +1.8s< 9.4s + +SFT: + +1.3s←---->4.1s + +VideoChat-R1: + +2.0s 10.7s + +\nThe man is seen standing in front of a pantry, looking down at something on the floor. He then bends down and picks up a pair of shoes from the floor. He then proceeds to put on the shoes. The event $"a person puts on a pair of shoes"$ occurs during this time. $\backslash n < /$ think>\n< timestep>\nThe event happens in 2.00 - 10.7 seconds.\n\n< answer>\n2.00 to 10.67\n + +![](images/487489ae1c1cc58b30c1bf03f7b482e23ff422fab2c58178909c5e2e21ee197f.jpg) +Video: + +![](images/1ec648e71eaf0421911e1ef7520b793de89f5ab05562652eaaa335fc6fc413bc.jpg) +Figure 3: Examples on Video QA task. It can be seen that VideoChat-R1 can not only answer questions correctly but also provide relatively accurate reference time periods (clue). + +![](images/9ff70423f7048b551160a4d558cb496609c136f669af2e980186a28f8919de18.jpg) + +![](images/4111b6c57103162103af289b7ae203f72e671c4a04ad2c215840b87e546c66ba.jpg) + +![](images/77431af26a587d74d78cf548fe934d2e4ac4879c9f8cd07d8ff92b3a6eaf09ee.jpg) + +![](images/348f3183f84c293ceb77e665d8f8e8125828222eb63fc005e492dbd23b5b10b7.jpg) + +Question: Why did the girl suddenly jump up from where she was standing? + +Ground Truth: Excited. + +[20.9s, 25.0s] + +E---> + +SFT: Dance. + +[19.4s, 37.2s] + +k-- + +---→ + +VideoChat-R1: Excited. + +[20.1s, 28.0s] + +<- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - + + The girl jumps up from her position after playing with the rocket toy and then walks around the room holding it. This suggests that she is excited about the toy and wants to show it off. The jump could be interpreted as an expression of excitement. + +E[(20.1,28.0)] + +# 4.3 Qualitative Results + +As shown in Figure 2 and 3, we provide visualizations of VideoChat-R1's outputs for the temporal grounding and video QA tasks. We observe that due to the enhanced spatiotemporal perception capability, VideoChat-R1 can more accurately locate video segments relevant to the question and conduct reasonable reasoning to answer the question. However, compared with the complex chains of thought involved in solving mathematical problems or coding tasks in text and image domains, the chains of thought generated in our current video tasks are relatively simple. We will explore more complex video reasoning tasks in future research. + +# 5 Conclusions + +In this work, we systematically investigate the role of reinforcement fine-tuning (RFT) with Group Relative Policy Optimization (GRPO) in enhancing video-centric multimodal large language models (MLLMs). Our experiments demonstrate that RFT is a highly data-efficient paradigm for task-specific improvements, enabling VideoChat-R1—a model trained with limited samples via multi-task RFT—to achieve state-of-the-art performance on spatio-temporal perception tasks while preserving general chat capabilities and exhibiting emergent spatiotemporal reasoning. We believe our work can present relevant insights for future research efforts in reinforcement learning of video MLLMs. + +# References + +[1] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025. +[2] Zhuo Cao, Bingqing Zhang, Heming Du, Xin Yu, Xue Li, and Sen Wang. Flashvtg: Feature layering and adaptive score handling network for video temporal grounding. arXiv preprint arXiv:2412.13441, 2024. +[3] Huilin Deng, Ding Zou, Rui Ma, Hongchen Luo, Yang Cao, and Yu Kang. Boosting the generalization and reasoning of vision language models with curriculum reinforcement learning. arXiv preprint arXiv:2503.07065, 2025. +[4] Yihe Deng, Hritik Bansal, Fan Yin, Nanyun Peng, Wei Wang, and Kai-Wei Chang. Openthinker: An early exploration to complex vision-language reasoning via iterative self-improvement. arXiv preprint arXiv:2503.17352, 2025. +[5] Kaituo Feng, Kaixiong Gong, Bohao Li, Zonghao Guo, Yibing Wang, Tianshuo Peng, Benyou Wang, and Xiangyu Yue. Video-r1: Reinforcing video reasoning in mllms. arXiv preprint arXiv:2503.21776, 2025. +[6] Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024. +[7] Jiyang Gao, Chen Sun, Zhenheng Yang, and Ram Nevatia. Tall: Temporal activity localization via language query. In Proceedings of the IEEE international conference on computer vision, pages 5267-5275, 2017. +[8] Aleksandr Gordeev, Vladimir Dokholyan, Irina Tolstykh, and Maksim Kuprashevich. Saliency-guided detr for moment retrieval and highlight detection. arXiv preprint arXiv:2410.01615, 2024. +[9] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. +[10] Lianghua Huang, Xin Zhao, and Kaiqi Huang. Got-10k: A large high-diversity benchmark for generic object tracking in the wild. IEEE transactions on pattern analysis and machine intelligence, 43(5): 1562–1577, 2019. +[11] Zhenpeng Huang, Xinhao Li, Jiaqi Li, Jing Wang, Xiangyu Zeng, Cheng Liang, Tao Wu, Xi Chen, Liang Li, and Limin Wang. Online video understanding: A comprehensive benchmark and memory-augmented method. arXiv preprint arXiv:2501.00584, 2024. +[12] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024. +[13] Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. Dense-captioning events in videos. In Proceedings of the IEEE international conference on computer vision, pages 706–715, 2017. +[14] KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023. +[15] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mybench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024. +[16] Xinhao Li, Zhenpeng Huang, Jing Wang, Kunchang Li, and Limin Wang. Videoeval: Comprehensive benchmark suite for low-cost evaluation of video foundation model. arXiv preprint arXiv:2407.06491, 2024. +[17] Xinhao Li, Yi Wang, Jiashuo Yu, Xiangyu Zeng, Yuhan Zhu, Haian Huang, Jianfei Gao, Kunchang Li, Yinan He, Chenting Wang, et al. Videochat-flash: Hierarchical compression for long-context video modeling. arXiv preprint arXiv:2501.00574, 2024. +[18] Yuqi Liu, Bohao Peng, Zhisheng Zhong, Zihao Yue, Fanbin Lu, Bei Yu, and Jiaya Jia. Seg-zero: Reasoning-chain guided segmentation via cognitive reinforcement. arXiv preprint arXiv:2503.06520, 2025. +[19] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025. + +[20] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-chatgpt: Towards detailed video understanding via large vision and language models. arXiv preprint arXiv:2306.05424, 2023. +[21] Viorica Patraucean, Lucas Smaira, Ankush Gupta, Adria Recasens, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Mateusz Malinowski, Yi Yang, Carl Doersch, et al. Perception test: A diagnostic benchmark for multimodal video models. Advances in Neural Information Processing Systems, 36: 42748-42761, 2023. +[22] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b lmms with strong reasoning abilities through two-stage rule-based r1. arXiv preprint arXiv:2503.07536, 2025. +[23] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. +[24] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025. +[25] Jiawei Wang, Liping Yuan, Yuchen Zhang, and Haomiao Sun. Tarsier: Recipes for training and evaluating large video description models. arXiv preprint arXiv:2407.00634, 2024. +[26] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. +[27] Ye Wang, Boshen Xu, Zihao Yue, Zihan Xiao, Ziheng Wang, Liang Zhang, Dingyi Yang, Wenxuan Wang, and Qin Jin. Timezero: Temporal video grounding with reasoning-guided lvm. arXiv preprint arXiv:2503.13377, 2025. +[28] Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Guo Chen, Baoqi Pei, Rongkun Zheng, Zun Wang, Yansong Shi, et al. Internvideo2: Scaling foundation models for multimodal video understanding. In European Conference on Computer Vision, pages 396-416. Springer, 2024. +[29] Yi Wang, Xinhao Li, Ziang Yan, Yinan He, Jiashuo Yu, Xiangyu Zeng, Chenting Wang, Changlian Ma, Haian Huang, Jianfei Gao, et al. Internvideo2.5: Empowering video mllms with long and rich context modeling. arXiv preprint arXiv:2501.12386, 2025. +[30] Haoning Wu, Dongxu Li, Bei Chen, and Junnan Li. Longvideobench: A benchmark for long-context interleaved video-language understanding. Advances in Neural Information Processing Systems, 37: 28828-28857, 2024. +[31] Junbin Xiao, Angela Yao, Yicong Li, and Tat-Seng Chua. Can i trust your answer? visually grounded video question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13204-13214, 2024. +[32] Yifan Xu, Xinhao Li, Yichun Yang, Rui Huang, and Limin Wang. Fine-grained video-text retrieval: A new benchmark and method. arXiv preprint arXiv:2501.00513, 2024. +[33] Ziang Yan, Zhilin Li, Yinan He, Chenting Wang, Kunchang Li, Xinhao Li, Xiangyu Zeng, Zilei Wang, Yali Wang, Yu Qiao, et al. Task preference optimization: Improving multimodal large language models with vision task alignment. arXiv preprint arXiv:2412.19326, 2024. +[34] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024. +[35] Yi Yang, Xiaoxuan He, Hongkun Pan, Xiyan Jiang, Yan Deng, Xingtao Yang, Haoyu Lu, Dacheng Yin, Fengyun Rao, Minfeng Zhu, et al. R1-onevision: Advancing generalized multimodal reasoning through cross-modal formalization. arXiv preprint arXiv:2503.10615, 2025. +[36] En Yu, Liang Zhao, Yana Wei, Jinrong Yang, Dongming Wu, Lingyu Kong, Haoran Wei, Tiancai Wang, Zheng Ge, Xiangyu Zhang, et al. Merlin: Empowering multimodal llms with foresight minds. In European Conference on Computer Vision, pages 425-443. Springer, 2024. +[37] Xiangyu Zeng, Kunchang Li, Chenting Wang, Xinhao Li, Tianxiang Jiang, Ziang Yan, Songze Li, Yansong Shi, Zhengrong Yue, Yi Wang, et al. Timesuite: Improving mllms for long video understanding via grounded tuning. arXiv preprint arXiv:2410.19702, 2024. + +[38] Yufei Zhan, Yousong Zhu, Shurong Zheng, Hongyin Zhao, Fan Yang, Ming Tang, and Jinqiao Wang. Vision-r1: Evolving human-free alignment in large vision-language models via vision-guided reinforcement learning. arXiv preprint arXiv:2503.18013, 2025. +[39] Yufei Zhan, Yousong Zhu, Shurong Zheng, Hongyin Zhao, Fan Yang, Ming Tang, and Jinqiao Wang. Vision-r1: Evolving human-free alignment in large vision-language models via vision-guided reinforcement learning. arXiv preprint arXiv:2503.18013, 2025. +[40] Jingyi Zhang, Jiaxing Huang, Huanjin Yao, Shunyu Liu, Xikun Zhang, Shijian Lu, and Dacheng Tao. R1-vl: Learning to reason with multimodal large language models via step-wise group relative policy optimization. arXiv preprint arXiv:2503.12937, 2025. +[41] Yuanhan Zhang, Jinming Wu, Wei Li, Bo Li, Zejun Ma, Ziwei Liu, and Chunyuan Li. Video instruction tuning with synthetic data. arXiv preprint arXiv:2410.02713, 2024. +[42] Jiaxing Zhao, Xihan Wei, and Liefeng Bo. R1-omni: Explainable omni-multimodal emotion recognition with reinforcement learning. arXiv e-prints, pages arXiv-2503, 2025. +[43] Hengguang Zhou, Xinui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. R1-zero's" aha moment" in visual reasoning on a 2b non-sft model. arXiv preprint arXiv:2503.05132, 2025. \ No newline at end of file diff --git a/data/2025/2504_06xxx/2504.06958/images/065345b3bee4b365c92af6880c7186140669d936b910a2cc42e2993ef88b5aea.jpg b/data/2025/2504_06xxx/2504.06958/images/065345b3bee4b365c92af6880c7186140669d936b910a2cc42e2993ef88b5aea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ac9e345690e97cbafb36d32585a65344bd738d30 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/065345b3bee4b365c92af6880c7186140669d936b910a2cc42e2993ef88b5aea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed121cfd10d73e2240e4bc43dd075f28eae55fd435cb0bbffb7f7839324d3102 +size 4868 diff --git a/data/2025/2504_06xxx/2504.06958/images/14772caaea820bce8abfff3fb25fe055a3d3f489303d7e859376b1f66919cc82.jpg b/data/2025/2504_06xxx/2504.06958/images/14772caaea820bce8abfff3fb25fe055a3d3f489303d7e859376b1f66919cc82.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3efdada16f9e5915b4195965016e56df04aaeb99 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/14772caaea820bce8abfff3fb25fe055a3d3f489303d7e859376b1f66919cc82.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1d457023af4b5cc2941ff6e288ec1e94087417f7927989a3036a3932c2138c2f +size 4612 diff --git a/data/2025/2504_06xxx/2504.06958/images/1ec648e71eaf0421911e1ef7520b793de89f5ab05562652eaaa335fc6fc413bc.jpg b/data/2025/2504_06xxx/2504.06958/images/1ec648e71eaf0421911e1ef7520b793de89f5ab05562652eaaa335fc6fc413bc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..82cbc3e16f05c44ab5127054ee5eac763e69b484 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/1ec648e71eaf0421911e1ef7520b793de89f5ab05562652eaaa335fc6fc413bc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89658dbc26830d58451e8b3654cbed91316357b3ae1b57f9f2ebf960ed10a1b6 +size 3071 diff --git a/data/2025/2504_06xxx/2504.06958/images/23efda807d3631875492b128e1c6d43a6d1b3acfb866a8d220b7971a875b30c3.jpg b/data/2025/2504_06xxx/2504.06958/images/23efda807d3631875492b128e1c6d43a6d1b3acfb866a8d220b7971a875b30c3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6657650cc2a307b493540da2715376084a932930 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/23efda807d3631875492b128e1c6d43a6d1b3acfb866a8d220b7971a875b30c3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:598a5859d5bb5dbed6fe1df469c055a8860f44e75005aaf01722b73a59549e63 +size 62260 diff --git a/data/2025/2504_06xxx/2504.06958/images/348f3183f84c293ceb77e665d8f8e8125828222eb63fc005e492dbd23b5b10b7.jpg b/data/2025/2504_06xxx/2504.06958/images/348f3183f84c293ceb77e665d8f8e8125828222eb63fc005e492dbd23b5b10b7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb6c6b279ebf7eaf674a5fd9eab8b09e3d2dc0a0 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/348f3183f84c293ceb77e665d8f8e8125828222eb63fc005e492dbd23b5b10b7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:624cfa69831c96df43a38272e17012374498e0aca94950aa7d90641ae4803685 +size 3023 diff --git a/data/2025/2504_06xxx/2504.06958/images/3becb2134d2fda7594ca6af18f1f2dab9032a5d21f1b230980bb26d2653d10ee.jpg b/data/2025/2504_06xxx/2504.06958/images/3becb2134d2fda7594ca6af18f1f2dab9032a5d21f1b230980bb26d2653d10ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9d9a808d873e493ff9053bf7e5ca60b3a28fd4fe --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/3becb2134d2fda7594ca6af18f1f2dab9032a5d21f1b230980bb26d2653d10ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f285250922e833a650efac4597c198c1521be4799e007062882d6c3be036b6c3 +size 3911 diff --git a/data/2025/2504_06xxx/2504.06958/images/4111b6c57103162103af289b7ae203f72e671c4a04ad2c215840b87e546c66ba.jpg b/data/2025/2504_06xxx/2504.06958/images/4111b6c57103162103af289b7ae203f72e671c4a04ad2c215840b87e546c66ba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2ed49dafe50bc69beada6397ad9159176ce98d77 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/4111b6c57103162103af289b7ae203f72e671c4a04ad2c215840b87e546c66ba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc6f3b9db44bf92c21846742594fbb350152891c423cf9c3dcfcdd1d05091ab6 +size 3357 diff --git a/data/2025/2504_06xxx/2504.06958/images/487489ae1c1cc58b30c1bf03f7b482e23ff422fab2c58178909c5e2e21ee197f.jpg b/data/2025/2504_06xxx/2504.06958/images/487489ae1c1cc58b30c1bf03f7b482e23ff422fab2c58178909c5e2e21ee197f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..73619a6e9b5658cc7569952d6c22d74f1a11b046 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/487489ae1c1cc58b30c1bf03f7b482e23ff422fab2c58178909c5e2e21ee197f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa62dd238c8a240c396ebd22c0080984d26890511836648c82e2bea782c5e4bb +size 3246 diff --git a/data/2025/2504_06xxx/2504.06958/images/49c8de9ab6aa449d0493ba544821c43902ec33037647e79973fb1de7deb18b9e.jpg b/data/2025/2504_06xxx/2504.06958/images/49c8de9ab6aa449d0493ba544821c43902ec33037647e79973fb1de7deb18b9e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c011beea087d1e49aa32f79a113aa1ac781e375e --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/49c8de9ab6aa449d0493ba544821c43902ec33037647e79973fb1de7deb18b9e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6fb77fc0dcf9049f6b0576802885faa60c71a00a6c024270396552fa1b251d69 +size 6941 diff --git a/data/2025/2504_06xxx/2504.06958/images/4c1eda96c7469788fc0f4eed659c9324e2340ecd108e41f3e47883d9abc2b1d2.jpg b/data/2025/2504_06xxx/2504.06958/images/4c1eda96c7469788fc0f4eed659c9324e2340ecd108e41f3e47883d9abc2b1d2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1fd7f719c82d658d28ccd465273f2ac56f637857 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/4c1eda96c7469788fc0f4eed659c9324e2340ecd108e41f3e47883d9abc2b1d2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2e7bb595323a3878ba3d8d5fb5736ecf57d192f945c9e4af7f013fd5980bf8b8 +size 4591 diff --git a/data/2025/2504_06xxx/2504.06958/images/66de61c965e6f7125a4d0851a6daa88d02b59216d290423ca25063a4fad099db.jpg b/data/2025/2504_06xxx/2504.06958/images/66de61c965e6f7125a4d0851a6daa88d02b59216d290423ca25063a4fad099db.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c849132ad05371d1f11b4aa5b22320296945272 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/66de61c965e6f7125a4d0851a6daa88d02b59216d290423ca25063a4fad099db.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6892a91533572f96181b628d153c740dca89a4e24cb881dbca9b8a825e8bbe37 +size 54438 diff --git a/data/2025/2504_06xxx/2504.06958/images/6882c68a3a26b0712c0adff2c4095059248463c416caf7d765c85c49ea62ab41.jpg b/data/2025/2504_06xxx/2504.06958/images/6882c68a3a26b0712c0adff2c4095059248463c416caf7d765c85c49ea62ab41.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f060fc945b3f3547acc375c7db272eba81edb411 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/6882c68a3a26b0712c0adff2c4095059248463c416caf7d765c85c49ea62ab41.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a30b091f425d149cfb1d84380670b0029c203f98e3ae91113b2dd26808e32cff +size 81373 diff --git a/data/2025/2504_06xxx/2504.06958/images/72496d0ba4a70d63fdf98a9c30e1c1afa104d563fe58d28f0705277530954bb0.jpg b/data/2025/2504_06xxx/2504.06958/images/72496d0ba4a70d63fdf98a9c30e1c1afa104d563fe58d28f0705277530954bb0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e223f1d77caef418fd43fc44c60005512cfdf273 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/72496d0ba4a70d63fdf98a9c30e1c1afa104d563fe58d28f0705277530954bb0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ba0a0861aaef1d8120960ef3e3643f129862dd4ecb56bd7981fcb551e603cfc +size 6613 diff --git a/data/2025/2504_06xxx/2504.06958/images/77431af26a587d74d78cf548fe934d2e4ac4879c9f8cd07d8ff92b3a6eaf09ee.jpg b/data/2025/2504_06xxx/2504.06958/images/77431af26a587d74d78cf548fe934d2e4ac4879c9f8cd07d8ff92b3a6eaf09ee.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d61770ce18ec37d6cec3232de21cba0689244a6b --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/77431af26a587d74d78cf548fe934d2e4ac4879c9f8cd07d8ff92b3a6eaf09ee.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5ad41fff0d9af63ead80d6ff7995204d5cfa8196c8e193e23d61f994fda88f2f +size 2746 diff --git a/data/2025/2504_06xxx/2504.06958/images/8b7f6b3d5a683c9e85bc2a6d1964ab2ba421b06778bf9d9a77bfa254bc48d01d.jpg b/data/2025/2504_06xxx/2504.06958/images/8b7f6b3d5a683c9e85bc2a6d1964ab2ba421b06778bf9d9a77bfa254bc48d01d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..efd76897d28741d58af66d41943a2662d060abf3 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/8b7f6b3d5a683c9e85bc2a6d1964ab2ba421b06778bf9d9a77bfa254bc48d01d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c500aba91295b5e3a93ec9f69b491821632c794639df9700e80cbe599146faa4 +size 5141 diff --git a/data/2025/2504_06xxx/2504.06958/images/9157e90250119c00b063011b92192f98c3ffde61f85f248f6cd013fdfd7e36b4.jpg b/data/2025/2504_06xxx/2504.06958/images/9157e90250119c00b063011b92192f98c3ffde61f85f248f6cd013fdfd7e36b4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c209801903d5de5138405f124f7878928348d6bf --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/9157e90250119c00b063011b92192f98c3ffde61f85f248f6cd013fdfd7e36b4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:111bb29484d6f7aed1bc0af19fec67e4290fde3d00d6c2a716bce7c7b6c4b700 +size 56192 diff --git a/data/2025/2504_06xxx/2504.06958/images/9ff70423f7048b551160a4d558cb496609c136f669af2e980186a28f8919de18.jpg b/data/2025/2504_06xxx/2504.06958/images/9ff70423f7048b551160a4d558cb496609c136f669af2e980186a28f8919de18.jpg new file mode 100644 index 0000000000000000000000000000000000000000..086b0883f98e7905363c69f0c41bf8b7b60bfe91 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/9ff70423f7048b551160a4d558cb496609c136f669af2e980186a28f8919de18.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89e3fc7805a06957236d5cec331095aff481770a8dcf586382d9b5ddea27e5de +size 3242 diff --git a/data/2025/2504_06xxx/2504.06958/images/a2ffff3e525c5e82541212dbc219dab6bf6bfc109006d91ec7630a209db31dbb.jpg b/data/2025/2504_06xxx/2504.06958/images/a2ffff3e525c5e82541212dbc219dab6bf6bfc109006d91ec7630a209db31dbb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b8973868496163074f64c6efb6bb01b37aecfab --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/a2ffff3e525c5e82541212dbc219dab6bf6bfc109006d91ec7630a209db31dbb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4e336cbd2006f457bfb4fd8af8e7a58fd0aa1bf248161cab336f4aa0e7b6c2f +size 5698 diff --git a/data/2025/2504_06xxx/2504.06958/images/a5f2ff17d78ad3ac3b5b705326d9fff1a45eb05911e7490206d12ad88e7ed906.jpg b/data/2025/2504_06xxx/2504.06958/images/a5f2ff17d78ad3ac3b5b705326d9fff1a45eb05911e7490206d12ad88e7ed906.jpg new file mode 100644 index 0000000000000000000000000000000000000000..febe74744ff1d951e6f1ddb382201c1550db0c0f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/a5f2ff17d78ad3ac3b5b705326d9fff1a45eb05911e7490206d12ad88e7ed906.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9300e1aff59368949c2ce7276792d07b3396023c2442a50e7c48b945fa22f82e +size 4804 diff --git a/data/2025/2504_06xxx/2504.06958/images/b817657d6e54ca97c5f4318ea64a574570534ae3d5013321a235e1b68d612e10.jpg b/data/2025/2504_06xxx/2504.06958/images/b817657d6e54ca97c5f4318ea64a574570534ae3d5013321a235e1b68d612e10.jpg new file mode 100644 index 0000000000000000000000000000000000000000..78fcb360cac5663f34cd073f0a87db030c9eaebb --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/b817657d6e54ca97c5f4318ea64a574570534ae3d5013321a235e1b68d612e10.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c8598e0cb78bbffcec10770b353dc5e8ac843833ef445e2041fd655dbffccc94 +size 25518 diff --git a/data/2025/2504_06xxx/2504.06958/images/cc2f8bc7d7e5a4e75e02523ac34df5e291289f198558c63a1c2e62ec64dc125a.jpg b/data/2025/2504_06xxx/2504.06958/images/cc2f8bc7d7e5a4e75e02523ac34df5e291289f198558c63a1c2e62ec64dc125a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..34d9d0ef1f50b87e7fca0d921e08c15e429163c6 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/cc2f8bc7d7e5a4e75e02523ac34df5e291289f198558c63a1c2e62ec64dc125a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:77c66419eb492311a0a2d60617953f1cee6ce221bba859877f4fe459ab868963 +size 20306 diff --git a/data/2025/2504_06xxx/2504.06958/images/d4d21b6c2f0500b2d1c3036da4a9e1bf736b322c6df991e43cddd7bcba3cce63.jpg b/data/2025/2504_06xxx/2504.06958/images/d4d21b6c2f0500b2d1c3036da4a9e1bf736b322c6df991e43cddd7bcba3cce63.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3a44cebd38197fa55724ff63804c2e09bc983ac4 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/d4d21b6c2f0500b2d1c3036da4a9e1bf736b322c6df991e43cddd7bcba3cce63.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c5d5f700bd04dfdc7ecf35f99496a6548e0939b76d12e3ca8bece4cab44fb8a9 +size 4275 diff --git a/data/2025/2504_06xxx/2504.06958/images/dc2e7f5b87eda42235292244baad30099c0260240ad371faa2c3a5a56e9b7c19.jpg b/data/2025/2504_06xxx/2504.06958/images/dc2e7f5b87eda42235292244baad30099c0260240ad371faa2c3a5a56e9b7c19.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9ebb874dc13aedbefe74786873e3eabc6450d30c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/dc2e7f5b87eda42235292244baad30099c0260240ad371faa2c3a5a56e9b7c19.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:000238462fa56a03d8df4ed9ac80ae6709a5f35262993de66c3c3db201d5adc6 +size 5034 diff --git a/data/2025/2504_06xxx/2504.06958/images/e28f9a49ff6de42978fa86862a63a8825cbaa5600ce4964dad48ddb70c43d31e.jpg b/data/2025/2504_06xxx/2504.06958/images/e28f9a49ff6de42978fa86862a63a8825cbaa5600ce4964dad48ddb70c43d31e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f4db07c63e8eedc49f8631991eb801a97de32bf --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/e28f9a49ff6de42978fa86862a63a8825cbaa5600ce4964dad48ddb70c43d31e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:247c040e7b84e52d8e21f82466e689413dbf9f24caa7cfddbad369691c306091 +size 25594 diff --git a/data/2025/2504_06xxx/2504.06958/images/eaad3848c3f2cbd11d2d60de002994c397acb95d6539b0e3b8f9c88788af4071.jpg b/data/2025/2504_06xxx/2504.06958/images/eaad3848c3f2cbd11d2d60de002994c397acb95d6539b0e3b8f9c88788af4071.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bceda15e51f385d2624d751585354a5d63464683 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/eaad3848c3f2cbd11d2d60de002994c397acb95d6539b0e3b8f9c88788af4071.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36fb71e3f33c13666b94e2916fdae8bc8903bc13b65bf658e06ce41c1aef292c +size 4420 diff --git a/data/2025/2504_06xxx/2504.06958/images/ebb1f5b7ddf5b410b150201df6d2a256266c29fa0069c30730645a948e7e5874.jpg b/data/2025/2504_06xxx/2504.06958/images/ebb1f5b7ddf5b410b150201df6d2a256266c29fa0069c30730645a948e7e5874.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9a24d4a6c561a88e9c1fc329933d0f7a2ba0171c --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/ebb1f5b7ddf5b410b150201df6d2a256266c29fa0069c30730645a948e7e5874.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4808d80e9bd07549448927b861073f2c4bd170b41f0a23ac758a0ba7991adbe5 +size 53527 diff --git a/data/2025/2504_06xxx/2504.06958/images/efe32ffe43d9a4b8ff44e4baa15a315fb0a957be5891583cb0c9eec3748be70f.jpg b/data/2025/2504_06xxx/2504.06958/images/efe32ffe43d9a4b8ff44e4baa15a315fb0a957be5891583cb0c9eec3748be70f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8e912aef91b8e6122044b1f1b91d98a7d46fb97 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/efe32ffe43d9a4b8ff44e4baa15a315fb0a957be5891583cb0c9eec3748be70f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44dbe8835675cffacbbadf53b0387a1b925031c23e53ccf4b568a15464226b6f +size 9920 diff --git a/data/2025/2504_06xxx/2504.06958/images/f09d73a22ceb8cfbd8381414bc174876dd7505bc28c0d0c76d6535620480222f.jpg b/data/2025/2504_06xxx/2504.06958/images/f09d73a22ceb8cfbd8381414bc174876dd7505bc28c0d0c76d6535620480222f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b3d7f127596f0bb7b9ce929559b4127feee88f7f --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/images/f09d73a22ceb8cfbd8381414bc174876dd7505bc28c0d0c76d6535620480222f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3341a203cfa42dc92f425e19a06dbf2e823aefe30d24225a9ea0e69e7bb0900f +size 31563 diff --git a/data/2025/2504_06xxx/2504.06958/layout.json b/data/2025/2504_06xxx/2504.06958/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..17a79a63b9a290f491ecd8b6516548034ae09809 --- /dev/null +++ b/data/2025/2504_06xxx/2504.06958/layout.json @@ -0,0 +1,7862 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 142, + 97, + 470, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 97, + 470, + 138 + ], + "spans": [ + { + "bbox": [ + 142, + 97, + 470, + 138 + ], + "type": "text", + "content": "VideoChat-R1: Enhancing Spatio-Temporal Perception via Reinforcement Fine-Tuning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "spans": [ + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "text", + "content": "Xinhao Li" + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "inline_equation", + "content": "^{2,1*}" + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "text", + "content": ", Ziang Yan" + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "inline_equation", + "content": "^{3,1*}" + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "text", + "content": ", Desen Meng" + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "text", + "content": ", Lu Dong" + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "inline_equation", + "content": "^{4,1}" + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "text", + "content": ", Xiangyu Zeng" + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "inline_equation", + "content": "^{2,1}" + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "text", + "content": ", Yinan He" + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "text", + "content": ", Yali Wang" + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "inline_equation", + "content": "^{6,1}" + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "text", + "content": ", Yu Qiao" + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "inline_equation", + "content": "^{1,6}" + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "text", + "content": ", Yi Wang" + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "inline_equation", + "content": "^{\\dagger 1,5}" + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "text", + "content": ", Limin Wang" + }, + { + "bbox": [ + 120, + 177, + 489, + 202 + ], + "type": "inline_equation", + "content": "^{\\dagger 2,1}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 157, + 202, + 453, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 202, + 453, + 224 + ], + "spans": [ + { + "bbox": [ + 157, + 202, + 453, + 224 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 157, + 202, + 453, + 224 + ], + "type": "text", + "content": "Shanghai AI Laboratory " + }, + { + "bbox": [ + 157, + 202, + 453, + 224 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 157, + 202, + 453, + 224 + ], + "type": "text", + "content": "Nanjing University " + }, + { + "bbox": [ + 157, + 202, + 453, + 224 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 157, + 202, + 453, + 224 + ], + "type": "text", + "content": "Zhejiang University " + }, + { + "bbox": [ + 157, + 202, + 453, + 224 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 157, + 202, + 453, + 224 + ], + "type": "text", + "content": "University of Science and Technology of China " + }, + { + "bbox": [ + 157, + 202, + 453, + 224 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 157, + 202, + 453, + 224 + ], + "type": "text", + "content": "Shanghai Innovation Institute" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 164, + 224, + 446, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 164, + 224, + 446, + 245 + ], + "spans": [ + { + "bbox": [ + 164, + 224, + 446, + 245 + ], + "type": "text", + "content": "Shenzhen Institutes of Advanced Technology, Chinese Academy of Sciences https://github.com/OpenGVLab/VideoChat-R1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 281, + 274, + 329, + 286 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 274, + 329, + 286 + ], + "spans": [ + { + "bbox": [ + 281, + 274, + 329, + 286 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 140, + 298, + 470, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 298, + 470, + 474 + ], + "spans": [ + { + "bbox": [ + 140, + 298, + 470, + 474 + ], + "type": "text", + "content": "Reinforcement Learning (RL) benefits Large Language Models (LLMs) for complex reasoning. Inspired by this, we explore integrating spatio-temporal specific rewards into Multimodal Large Language Models (MLLMs) to address the unique challenges of video understanding, such as long-range temporal associations. This paper investigates how rule-based rewards, particularly temporal ones, can improve video reasoning and their generalizability. Our study proposes Reinforcement Fine-Tuning (RFT) as a data-efficient method to enhance video reasoning on specific tasks without sacrificing original capabilities. Through joint RFT on multiple spatio-temporal perception tasks, we developed VideoChat-R1, a powerful Video MLLM. VideoChat-R1 achieves state-of-the-art spatio-temporal perception, demonstrating significant improvements in tasks like temporal grounding (+31.8) and object tracking (+31.2), while also improving general QA benchmarks. The enhanced perception and preserved chat abilities contribute to a more reliable video dialogue system, leading to our \"Temporal Clue-driven Reasoning\" inference schema. This work provides a foundation for developing robust, real-world video comprehension agents." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 492, + 192, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 492, + 192, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 192, + 504 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 516, + 506, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 516, + 506, + 595 + ], + "spans": [ + { + "bbox": [ + 104, + 516, + 506, + 595 + ], + "type": "text", + "content": "The integration of reinforcement learning (RL) has notably propelled the capabilities of large language models (LLMs) forward, particularly in complex reasoning. Studies, such as o-series from OpenAI [12] and R1 of DeepSeek [9], demonstrated that test-time scaling improves model reasoning and such reasoning is elicit-able via only rule-based rewarding. Inspired by these advances, community is interested in tuning multimodal large language models (MLLMs) with vision-related verifications in Group Relative Policy Optimization (GRPO) [9] manner [43, 18, 38, 3, 22, 19, 35, 40, 39, 4]. They primarily consider visual mathematical reasoning and spatial localization." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 598, + 506, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 598, + 506, + 698 + ], + "spans": [ + { + "bbox": [ + 104, + 598, + 506, + 698 + ], + "type": "text", + "content": "Regarding video understanding, reasoning is also crucial as some of its core abilities (e.g. its long-range temporal association, fine-grained spatiotemporal understanding based on user queries) are barely addressed via perception or simple analysis. Initial works [42, 27, 5] have validated the superiority of the GRPO algorithm over supervised fine-tuning in some specific video tasks, such as temporal grounding and video question answer. Considering advancing the agent development with real-world video comprehension, we need to investigate and integrate spatio-temporal specific rewards or verification mechanisms directly into the learning of current MLLMs in a scalable manner. This could foster a more tightly coupled, near-closed-loop learning environment, enabling MLLMs to master intricate spatio-temporal skills. While our current work may not fully achieve this ambitious" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 217, + 35, + 572 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 217, + 35, + 572 + ], + "spans": [ + { + "bbox": [ + 14, + 217, + 35, + 572 + ], + "type": "text", + "content": "arXiv:2504.06958v5 [cs.CV] 11 Nov 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 121, + 703, + 293, + 715 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 703, + 293, + 715 + ], + "spans": [ + { + "bbox": [ + 121, + 703, + 293, + 715 + ], + "type": "text", + "content": "* Equal contribution. † Corresponding authors." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 731, + 193, + 742 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 731, + 193, + 742 + ], + "spans": [ + { + "bbox": [ + 105, + 731, + 193, + 742 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 74, + 500, + 230 + ], + "blocks": [ + { + "bbox": [ + 111, + 74, + 500, + 230 + ], + "lines": [ + { + "bbox": [ + 111, + 74, + 500, + 230 + ], + "spans": [ + { + "bbox": [ + 111, + 74, + 500, + 230 + ], + "type": "image", + "image_path": "6882c68a3a26b0712c0adff2c4095059248463c416caf7d765c85c49ea62ab41.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 240, + 504, + 275 + ], + "lines": [ + { + "bbox": [ + 104, + 240, + 504, + 275 + ], + "spans": [ + { + "bbox": [ + 104, + 240, + 504, + 275 + ], + "type": "text", + "content": "Figure 1: Overview of VideoChat-R1. Through reinforcement learning fine-tuning using GRPO, VideoChat-R1 has powerful spatio-temporal perception capabilities and can apply these capabilities in chatting scenarios." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 306, + 504, + 352 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 306, + 504, + 352 + ], + "spans": [ + { + "bbox": [ + 104, + 306, + 504, + 352 + ], + "type": "text", + "content": "goal, it endeavors to lay foundational groundwork for the community by exploring how to embed such mechanisms effectively. Specifically, we in this paper chart the landscape of how rule-based rewards (especially temporal related ones) work in video understanding, along with systematic evaluations of the its generalizability across diverse video-based reasoning scenarios." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 355, + 504, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 355, + 504, + 432 + ], + "spans": [ + { + "bbox": [ + 104, + 355, + 504, + 432 + ], + "type": "text", + "content": "Our study begins with spatio-temporal perception, extending to other popular video tasks (e.g. video caption), forming a practical recipe to enhance video reasoning via proper Reinforcement Fine-Tuning (RFT). We evaluate the robustness of various video verifications through different training and testing setting, also benchmarking their respective data requirements. Moreover, how these rewards interact with each other are also systematically explored. Based on these experiences, we give an intuitive and effective inference schema named \"Temporal Clue-driven Reasoning\". Generally, our main findings are as follows." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 451, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 132, + 451, + 504, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 451, + 504, + 507 + ], + "spans": [ + { + "bbox": [ + 132, + 451, + 504, + 507 + ], + "type": "text", + "content": "- Reinforcement fine-tuning is data-efficient for enhancing models on specific tasks without sacrificing original capabilities. With a small amount of data, training via RFT can yield a remarkable improvement in spatio-temporal perception ability, and there is negligible impact on the performance of out-domain tasks and the original general capabilities of the model, which outperforms traditional supervised fine-tuning significantly." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 132, + 521, + 506, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 521, + 506, + 620 + ], + "spans": [ + { + "bbox": [ + 132, + 521, + 506, + 620 + ], + "type": "text", + "content": "- Through joint reinforcement fine-tuning on multiple spatio-temporal perception tasks, we construct VideoChat-R1, a powerful Video MLLM that boasts state-of-the-art spatiotemporal perception capabilities while also taking into account chat abilities. We have also discovered that training on spatio-temporal perception tasks has slightly strengthened the model's spatio-temporal reasoning abilities. Compared with Qwen2.5-VL-7B, VideoChat-R1 achieves several times the performance improvement in spatiotemporal perception tasks such as temporal grounding (+31.8) and object track (+31.2). At the same time, it also achieves non-trivial improvements on general QA benchmarks, such as VideoMME (+0.9), MVBench (+1.0), and Perception Test (+0.9)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 132, + 634, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 634, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 132, + 634, + 506, + 723 + ], + "type": "text", + "content": "- The improvement of spatio-temporal perception ability and the preservation of the original chat capability can contribute to a more reliable and efficient video dialogue system. Our VideoChat-R1 can provide reference video segments when answering users' questions. Meanwhile, we propose to utilize these video segments for \"Temporal Clue-driven Reasoning\" to further obtain more accurate answers. Our experimental results reveal the potential of the approach that enhances the model's spatiotemporal perception ability through reinforcement learning for future research in the directions of reliable video dialogue systems and long video understanding." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 195, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 195, + 83 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 195, + 83 + ], + "type": "text", + "content": "2 Related work" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 96, + 506, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 96, + 506, + 207 + ], + "spans": [ + { + "bbox": [ + 104, + 96, + 506, + 207 + ], + "type": "text", + "content": "Reasoning in MLLMs. The field LLMs has witnessed significant breakthroughs in reasoning, exemplified by recent works [12, 9]. These developments [23, 9, 24] have markedly improved LLMs' proficiency in tackling complex, multi-step tasks, such as challenging mathematical problems and coding exercises. Extending these RL techniques to multimodal large language models (MLLMs) has seen numerous efforts [43, 18, 38, 3, 22, 19, 35, 40, 39, 4] focused on leveraging verifiable reward mechanisms to boost visual reasoning performance. However, the application of RL-based strategies to spatio-temporal reasoning within the video domain remains comparatively less explored, with a few studies [27, 42, 5] investigating this area. [27] and [42] show GRPO's potential in temporal grounding and sentiment analysis, while, [5] demonstrates how GRPO can facilitate temporal reasoning." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 220, + 506, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 220, + 506, + 299 + ], + "spans": [ + { + "bbox": [ + 104, + 220, + 506, + 299 + ], + "type": "text", + "content": "Spatio-Temporal Understanding with MLLMs. Video understanding heavily relies on spatiotemporal perception. Progress in Video Multimodal Large Language Models (video MLLMs) [14, 20, 15, 28, 41, 29, 17, 11, 1] addresses video question answering and captioning in the unfed dialogue format, leveraging LLMs to organize predictions over visual perceptions. [36] and [37] leverage data augmentation for temporal modeling, yet compromising their general performance. [33] improves fine-grained spatio-temporal perception by task preference optimization with solid supervised fine-tuning using a lot of data." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 314, + 194, + 328 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 314, + 194, + 328 + ], + "spans": [ + { + "bbox": [ + 105, + 314, + 194, + 328 + ], + "type": "text", + "content": "3 Methodology" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 340, + 504, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 340, + 504, + 364 + ], + "spans": [ + { + "bbox": [ + 104, + 340, + 504, + 364 + ], + "type": "text", + "content": "We present how to exploit the spatio-temporal rewards to improve MLLMs in video domains using GRPO. Before that, we revisit the Group Relative Policy Optimization (GRPO) [23] first." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 377, + 182, + 390 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 377, + 182, + 390 + ], + "spans": [ + { + "bbox": [ + 105, + 377, + 182, + 390 + ], + "type": "text", + "content": "3.1 Preliminary" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 399, + 505, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 399, + 505, + 465 + ], + "spans": [ + { + "bbox": [ + 104, + 399, + 505, + 465 + ], + "type": "text", + "content": "Group Relative Policy Optimization (GRPO) [23] compares groups of candidates responses directly, independent on critic models. Regarding this, it significantly lowers training resources. Formally, with an input query " + }, + { + "bbox": [ + 104, + 399, + 505, + 465 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 399, + 505, + 465 + ], + "type": "text", + "content": ", GRPO initiates by generating a set of " + }, + { + "bbox": [ + 104, + 399, + 505, + 465 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 399, + 505, + 465 + ], + "type": "text", + "content": " distinct candidate responses, denoted as " + }, + { + "bbox": [ + 104, + 399, + 505, + 465 + ], + "type": "inline_equation", + "content": "o = \\{o_1, \\dots, o_G\\}" + }, + { + "bbox": [ + 104, + 399, + 505, + 465 + ], + "type": "text", + "content": ", via policy sampling. Each of these candidates is then assigned a score from a predefined reward function, yielding " + }, + { + "bbox": [ + 104, + 399, + 505, + 465 + ], + "type": "inline_equation", + "content": "R_1, \\dots, R_G" + }, + { + "bbox": [ + 104, + 399, + 505, + 465 + ], + "type": "text", + "content": ". To normalize these scores and ascertain the relative quality of each response, GRPO computes their mean and standard deviation:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 244, + 472, + 505, + 500 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 472, + 505, + 500 + ], + "spans": [ + { + "bbox": [ + 244, + 472, + 505, + 500 + ], + "type": "interline_equation", + "content": "A _ {i} = \\frac {R _ {i} - \\operatorname {m e a n} \\left(\\left\\{R _ {i} \\right\\} _ {i = 1} ^ {G}\\right)}{\\operatorname {s t d} \\left(\\left\\{R _ {i} \\right\\} _ {i = 1} ^ {G}\\right)}. \\tag {1}", + "image_path": "49c8de9ab6aa449d0493ba544821c43902ec33037647e79973fb1de7deb18b9e.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 506, + 504, + 562 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 504, + 562 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 504, + 562 + ], + "type": "text", + "content": "Here " + }, + { + "bbox": [ + 104, + 506, + 504, + 562 + ], + "type": "inline_equation", + "content": "A_{i}" + }, + { + "bbox": [ + 104, + 506, + 504, + 562 + ], + "type": "text", + "content": " quantifies the relative quality of the " + }, + { + "bbox": [ + 104, + 506, + 504, + 562 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 506, + 504, + 562 + ], + "type": "text", + "content": "-th candidate response within its group. The objective of GRPO is to encourage the model to prioritize and generate responses that achieve higher relative scores within such groups. Furthermore, the overall training objective incorporates a KL-divergence term " + }, + { + "bbox": [ + 104, + 506, + 504, + 562 + ], + "type": "inline_equation", + "content": "\\mathrm{D}_{\\mathrm{KL}}(\\cdot |\\cdot)" + }, + { + "bbox": [ + 104, + 506, + 504, + 562 + ], + "type": "text", + "content": ", which serves to constrain the optimized policy " + }, + { + "bbox": [ + 104, + 506, + 504, + 562 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 104, + 506, + 504, + 562 + ], + "type": "text", + "content": " from diverging excessively from the original MLLM parameters " + }, + { + "bbox": [ + 104, + 506, + 504, + 562 + ], + "type": "inline_equation", + "content": "\\pi_{\\mathrm{ref}}" + }, + { + "bbox": [ + 104, + 506, + 504, + 562 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 577, + 358, + 589 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 577, + 358, + 589 + ], + "spans": [ + { + "bbox": [ + 104, + 577, + 358, + 589 + ], + "type": "text", + "content": "3.2 Spatio-Temporal Rewards of Video MLLM in GRPO" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 597, + 506, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 597, + 506, + 632 + ], + "spans": [ + { + "bbox": [ + 104, + 597, + 506, + 632 + ], + "type": "text", + "content": "We explore how to use GRPO to enhance the performance of Video MLLM in video-language understanding. We consider the five most common types of video related tasks: temporal grounding, object tracking, video question answering, captioning, and quality assessment in our experiments." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 644, + 504, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 644, + 504, + 689 + ], + "spans": [ + { + "bbox": [ + 104, + 644, + 504, + 689 + ], + "type": "text", + "content": "Format reward. To enable the model to output responses in the format we desire. For example, we expect the model to enclose its thought process with ... and the answer with ..., we designed a format reward " + }, + { + "bbox": [ + 104, + 644, + 504, + 689 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{format}}" + }, + { + "bbox": [ + 104, + 644, + 504, + 689 + ], + "type": "text", + "content": " for each task. We use regular expression matching to determine whether the model adheres to our specified format:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 205, + 698, + 505, + 724 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 205, + 698, + 505, + 724 + ], + "spans": [ + { + "bbox": [ + 205, + 698, + 505, + 724 + ], + "type": "interline_equation", + "content": "R _ {\\text {f o r m a t}} = \\left\\{ \\begin{array}{l l} 1, & \\text {i f o u t p u t m a t c h e s f o r m a t}, \\\\ 0, & \\text {i f o u t p u t d o e s n ’ t m a t c h f o r m a t}. \\end{array} \\right. \\tag {2}", + "image_path": "efe32ffe43d9a4b8ff44e4baa15a315fb0a957be5891583cb0c9eec3748be70f.jpg" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": "IoU reward in spatio-temporal perception. For the spatio-temporal perception such as temporal grounding and object tracking, it requires the Video MLLM to output the time interval in the video that is associated with the content of a given textual query. Evidently, we can use the Intersection over Union (IoU) between the predicted interval by the model and the ground-truth interval as the reward function. This reward function effectively characterizes the accuracy of the interval predicted by the model." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 258, + 144, + 505, + 171 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 258, + 144, + 505, + 171 + ], + "spans": [ + { + "bbox": [ + 258, + 144, + 505, + 171 + ], + "type": "interline_equation", + "content": "R _ {\\mathrm {I o U}} = \\frac {\\left| \\mathcal {I} _ {\\text {p r e d}} \\cap \\mathcal {I} _ {\\mathrm {g t}} \\right|}{\\left| \\mathcal {I} _ {\\text {p r e d}} \\cup \\mathcal {I} _ {\\mathrm {g t}} \\right|}, \\tag {3}", + "image_path": "a2ffff3e525c5e82541212dbc219dab6bf6bfc109006d91ec7630a209db31dbb.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 179, + 506, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 179, + 506, + 202 + ], + "spans": [ + { + "bbox": [ + 104, + 179, + 506, + 202 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 179, + 506, + 202 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{\\mathrm{pred}}" + }, + { + "bbox": [ + 104, + 179, + 506, + 202 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 179, + 506, + 202 + ], + "type": "inline_equation", + "content": "\\mathcal{I}_{\\mathrm{gt}}" + }, + { + "bbox": [ + 104, + 179, + 506, + 202 + ], + "type": "text", + "content": " are the predicted and the ground truth of time intervals or detection boxes, respectively." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 219, + 504, + 253 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 219, + 504, + 253 + ], + "spans": [ + { + "bbox": [ + 104, + 219, + 504, + 253 + ], + "type": "text", + "content": "Accuracy reward in classification. Discriminative tasks, such as multiple-choice video question answering and classification, aim to determine whether the model's prediction is consistent with the answer to the question. Therefore, we define:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 233, + 264, + 505, + 291 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 264, + 505, + 291 + ], + "spans": [ + { + "bbox": [ + 233, + 264, + 505, + 291 + ], + "type": "interline_equation", + "content": "R _ {\\text {a c c u a c y}} = \\left\\{ \\begin{array}{l l} 0, & \\text {i f} A _ {\\text {p r e d}} \\neq A _ {\\text {g t}} \\\\ 1, & \\text {i f} A _ {\\text {p r e d}} = A _ {\\text {g t}}, \\end{array} \\right. \\tag {4}", + "image_path": "72496d0ba4a70d63fdf98a9c30e1c1afa104d563fe58d28f0705277530954bb0.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 300, + 449, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 300, + 449, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 300, + 449, + 313 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 300, + 449, + 313 + ], + "type": "inline_equation", + "content": "A_{\\mathrm{pred}}" + }, + { + "bbox": [ + 104, + 300, + 449, + 313 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 300, + 449, + 313 + ], + "type": "inline_equation", + "content": "A_{\\mathrm{gt}}" + }, + { + "bbox": [ + 104, + 300, + 449, + 313 + ], + "type": "text", + "content": " denote the predicted and the ground truth answers, respectively." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 329, + 504, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 329, + 504, + 428 + ], + "spans": [ + { + "bbox": [ + 104, + 329, + 504, + 428 + ], + "type": "text", + "content": "Recall reward in video captioning. For tasks like video captioning with open-ended outputs, it is impossible to simply compare and determine the gap between the generated caption and the ground truth caption. Therefore, we use a LLM as a \"judge\" to provide a reward score. In order to reduce the uncertainty of the evaluation criteria for the LLM, we first make the LLM decompose the ground truth and predicted captions into events list. Specifically, we utilize Qwen2.5-72B [34] to extract the events in the description and judge whether the events in a ground truth description can be entailed by the description predicted by the model. We calculate the event recall score as the ratio of events in a ground truth description that are entailed by the predicted description, and set different rewards according to the event recall score:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 234, + 439, + 505, + 452 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 234, + 439, + 505, + 452 + ], + "spans": [ + { + "bbox": [ + 234, + 439, + 505, + 452 + ], + "type": "interline_equation", + "content": "R _ {\\text {r e c a l l}} = \\operatorname {R e c a l l} _ {\\text {e v e n t}} \\left(C _ {\\text {p r e d}}, C _ {\\text {g t}}\\right), \\tag {5}", + "image_path": "a5f2ff17d78ad3ac3b5b705326d9fff1a45eb05911e7490206d12ad88e7ed906.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 461, + 459, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 461, + 459, + 474 + ], + "spans": [ + { + "bbox": [ + 104, + 461, + 459, + 474 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 461, + 459, + 474 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{pred}}" + }, + { + "bbox": [ + 104, + 461, + 459, + 474 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 461, + 459, + 474 + ], + "type": "inline_equation", + "content": "C_{\\mathrm{gt}}" + }, + { + "bbox": [ + 104, + 461, + 459, + 474 + ], + "type": "text", + "content": " represent the predicted and the ground truth captions, respectively." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 478, + 506, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 478, + 506, + 500 + ], + "spans": [ + { + "bbox": [ + 104, + 478, + 506, + 500 + ], + "type": "text", + "content": "By combining the above reward functions, we explored how to utilize GRPO to enhance the performance of Video MLLM in various tasks. The specific details can be found in the Section 4." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 518, + 430, + 530 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 518, + 430, + 530 + ], + "spans": [ + { + "bbox": [ + 104, + 518, + 430, + 530 + ], + "type": "text", + "content": "3.3 Enhance Spatio-Temporal Perception of Video MLLM through GRPO" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 540, + 506, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 540, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 104, + 540, + 506, + 597 + ], + "type": "text", + "content": "The combination of reward functions. We adopt different combinations of reward functions for training in different tasks. Specifically, for the temporal grounding and object tracking task, " + }, + { + "bbox": [ + 104, + 540, + 506, + 597 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{st}} = R_{\\mathrm{format}} + R_{\\mathrm{IoU}}" + }, + { + "bbox": [ + 104, + 540, + 506, + 597 + ], + "type": "text", + "content": ". For the multi-choice QA and video quality assessment, " + }, + { + "bbox": [ + 104, + 540, + 506, + 597 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{qa}} = R_{\\mathrm{format}} + R_{\\mathrm{accuracy}}" + }, + { + "bbox": [ + 104, + 540, + 506, + 597 + ], + "type": "text", + "content": ". For the multi-choice QA with glue (e.g. Grounding QA), " + }, + { + "bbox": [ + 104, + 540, + 506, + 597 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{gqa}} = R_{\\mathrm{format}} + R_{\\mathrm{IoU}} + R_{\\mathrm{Acc}}" + }, + { + "bbox": [ + 104, + 540, + 506, + 597 + ], + "type": "text", + "content": ". For the video caption, " + }, + { + "bbox": [ + 104, + 540, + 506, + 597 + ], + "type": "inline_equation", + "content": "R_{\\mathrm{cap}} = R_{\\mathrm{format}} + R_{\\mathrm{Caption}}" + }, + { + "bbox": [ + 104, + 540, + 506, + 597 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "type": "text", + "content": "VideoChat-R1-18k. We collect diverse video corpus from existing public video datasets for reinforcement learning training to enhance the model's spatiotemporal perception ability. For the temporal grounding task, we use the training set of Charade - STA [7] (5,338 samples) for training. For the object tracking task, training is conducted on the GoT - 10k [10] dataset, which has 9,335 samples. For the QA and grounding QA tasks, the validation set of NExTGQA [31] (3,358 samples) is used for training. For video captioning, FIBER-1k [32] (1,000 samples) is adopted for training. For video quality assessment, we use the quality assessment task from VidTAB [16] under the 100-shot setting, with 200 samples for training. Finally, for the training of VideoChat-R1, we perform joint training on three spatio-temporal perception-related tasks: temporal grounding, object tracking, and grounding QA. In total, 18,031 samples are used for training." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 106, + 89, + 267, + 100 + ], + "blocks": [ + { + "bbox": [ + 106, + 75, + 318, + 87 + ], + "lines": [ + { + "bbox": [ + 106, + 75, + 318, + 87 + ], + "spans": [ + { + "bbox": [ + 106, + 75, + 318, + 87 + ], + "type": "text", + "content": "Algorithm 1: VideoChat " + }, + { + "bbox": [ + 106, + 75, + 318, + 87 + ], + "type": "inline_equation", + "content": "\\mathcal{M}" + }, + { + "bbox": [ + 106, + 75, + 318, + 87 + ], + "type": "text", + "content": " with \"Clue-Perception\"" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 106, + 89, + 267, + 100 + ], + "lines": [ + { + "bbox": [ + 106, + 89, + 267, + 100 + ], + "spans": [ + { + "bbox": [ + 106, + 89, + 267, + 100 + ], + "type": "text", + "content": "Input: " + }, + { + "bbox": [ + 106, + 89, + 267, + 100 + ], + "type": "inline_equation", + "content": "V_{\\mathrm{low}}" + }, + { + "bbox": [ + 106, + 89, + 267, + 100 + ], + "type": "text", + "content": ": Low-resolution/low-fps vi" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 106, + 101, + 176, + 111 + ], + "blocks": [ + { + "bbox": [ + 106, + 101, + 176, + 111 + ], + "lines": [ + { + "bbox": [ + 106, + 101, + 176, + 111 + ], + "spans": [ + { + "bbox": [ + 106, + 101, + 176, + 111 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 106, + 101, + 176, + 111 + ], + "type": "text", + "content": " :User question" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 106, + 111, + 223, + 121 + ], + "blocks": [ + { + "bbox": [ + 106, + 111, + 223, + 121 + ], + "lines": [ + { + "bbox": [ + 106, + 111, + 223, + 121 + ], + "spans": [ + { + "bbox": [ + 106, + 111, + 223, + 121 + ], + "type": "inline_equation", + "content": "\\Delta_{\\mathrm{res}}" + }, + { + "bbox": [ + 106, + 111, + 223, + 121 + ], + "type": "text", + "content": " : Resolution boost factor" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 106, + 122, + 223, + 133 + ], + "blocks": [ + { + "bbox": [ + 106, + 122, + 223, + 133 + ], + "lines": [ + { + "bbox": [ + 106, + 122, + 223, + 133 + ], + "spans": [ + { + "bbox": [ + 106, + 122, + 223, + 133 + ], + "type": "inline_equation", + "content": "\\Delta_{\\mathrm{fps}}" + }, + { + "bbox": [ + 106, + 122, + 223, + 133 + ], + "type": "text", + "content": " : Frame rate boost factor" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "latex" + }, + { + "type": "code", + "bbox": [ + 106, + 134, + 251, + 146 + ], + "blocks": [ + { + "bbox": [ + 106, + 134, + 251, + 146 + ], + "lines": [ + { + "bbox": [ + 106, + 134, + 251, + 146 + ], + "spans": [ + { + "bbox": [ + 106, + 134, + 251, + 146 + ], + "type": "text", + "content": "Output: " + }, + { + "bbox": [ + 106, + 134, + 251, + 146 + ], + "type": "inline_equation", + "content": "A^{(2)}" + }, + { + "bbox": [ + 106, + 134, + 251, + 146 + ], + "type": "text", + "content": ": Final refined answer" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 106, + 150, + 223, + 160 + ], + "blocks": [ + { + "bbox": [ + 106, + 150, + 223, + 160 + ], + "lines": [ + { + "bbox": [ + 106, + 150, + 223, + 160 + ], + "spans": [ + { + "bbox": [ + 106, + 150, + 223, + 160 + ], + "type": "text", + "content": "Initial Answer Generation:" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 106, + 160, + 206, + 173 + ], + "blocks": [ + { + "bbox": [ + 106, + 160, + 206, + 173 + ], + "lines": [ + { + "bbox": [ + 106, + 160, + 206, + 173 + ], + "spans": [ + { + "bbox": [ + 106, + 160, + 206, + 173 + ], + "type": "inline_equation", + "content": "(A^{(1)},\\mathcal{C})\\gets \\mathcal{M}(V_{\\mathrm{low}},Q)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "code_body" + } + ], + "index": 7, + "sub_type": "code", + "guess_lang": "latex" + }, + { + "type": "code", + "bbox": [ + 106, + 176, + 244, + 188 + ], + "blocks": [ + { + "bbox": [ + 106, + 176, + 244, + 188 + ], + "lines": [ + { + "bbox": [ + 106, + 176, + 244, + 188 + ], + "spans": [ + { + "bbox": [ + 106, + 176, + 244, + 188 + ], + "type": "text", + "content": "Clue Processing & Upsampling:" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_body" + } + ], + "index": 8, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 121, + 188, + 251, + 198 + ], + "blocks": [ + { + "bbox": [ + 121, + 188, + 251, + 198 + ], + "lines": [ + { + "bbox": [ + 121, + 188, + 251, + 198 + ], + "spans": [ + { + "bbox": [ + 121, + 188, + 251, + 198 + ], + "type": "inline_equation", + "content": "\\mathcal{T}_c \\gets" + }, + { + "bbox": [ + 121, + 188, + 251, + 198 + ], + "type": "text", + "content": " ExtractTemporalClues " + }, + { + "bbox": [ + 121, + 188, + 251, + 198 + ], + "type": "inline_equation", + "content": "(\\mathcal{C})" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "code_body" + } + ], + "index": 9, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 121, + 199, + 255, + 209 + ], + "blocks": [ + { + "bbox": [ + 121, + 199, + 255, + 209 + ], + "lines": [ + { + "bbox": [ + 121, + 199, + 255, + 209 + ], + "spans": [ + { + "bbox": [ + 121, + 199, + 255, + 209 + ], + "type": "inline_equation", + "content": "V_{\\mathrm{seg}} \\gets \\mathrm{SelectSegments}(V_{\\mathrm{low}}, \\mathcal{T}_c)" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "code_body" + } + ], + "index": 10, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 106, + 209, + 248, + 221 + ], + "blocks": [ + { + "bbox": [ + 106, + 209, + 248, + 221 + ], + "lines": [ + { + "bbox": [ + 106, + 209, + 248, + 221 + ], + "spans": [ + { + "bbox": [ + 106, + 209, + 248, + 221 + ], + "type": "inline_equation", + "content": "V_{\\mathrm{high}} \\gets" + }, + { + "bbox": [ + 106, + 209, + 248, + 221 + ], + "type": "text", + "content": " Upsample " + }, + { + "bbox": [ + 106, + 209, + 248, + 221 + ], + "type": "inline_equation", + "content": "(V_{\\mathrm{seg}}, \\Delta_{\\mathrm{res}}, \\Delta_{\\mathrm{fps}})" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "code_body" + } + ], + "index": 11, + "sub_type": "code", + "guess_lang": "latex" + }, + { + "type": "code", + "bbox": [ + 106, + 224, + 219, + 235 + ], + "blocks": [ + { + "bbox": [ + 106, + 224, + 219, + 235 + ], + "lines": [ + { + "bbox": [ + 106, + 224, + 219, + 235 + ], + "spans": [ + { + "bbox": [ + 106, + 224, + 219, + 235 + ], + "type": "text", + "content": "Final Answer Generation:" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "code_body" + } + ], + "index": 12, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 106, + 235, + 192, + 247 + ], + "blocks": [ + { + "bbox": [ + 106, + 235, + 192, + 247 + ], + "lines": [ + { + "bbox": [ + 106, + 235, + 192, + 247 + ], + "spans": [ + { + "bbox": [ + 106, + 235, + 192, + 247 + ], + "type": "inline_equation", + "content": "A^{(2)}\\gets \\mathcal{M}(V_{\\mathrm{high}},Q)" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "code_body" + } + ], + "index": 13, + "sub_type": "code", + "guess_lang": "latex" + }, + { + "type": "code", + "bbox": [ + 106, + 251, + 155, + 263 + ], + "blocks": [ + { + "bbox": [ + 106, + 251, + 155, + 263 + ], + "lines": [ + { + "bbox": [ + 106, + 251, + 155, + 263 + ], + "spans": [ + { + "bbox": [ + 106, + 251, + 155, + 263 + ], + "type": "text", + "content": "return " + }, + { + "bbox": [ + 106, + 251, + 155, + 263 + ], + "type": "inline_equation", + "content": "A^{(2)}" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "code_body" + } + ], + "index": 14, + "sub_type": "code", + "guess_lang": "lua" + }, + { + "bbox": [ + 104, + 281, + 506, + 381 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 281, + 506, + 381 + ], + "spans": [ + { + "bbox": [ + 104, + 281, + 506, + 381 + ], + "type": "text", + "content": "Chat with Enhanced Spatio-Temporal Perception. After enhancing the spatiotemporal perception capability of MLLMs, we can construct a more reliable video chat system. Specifically, after the model answers a user's question, it can provide relatively accurate clues that support the answer to that question. We can further leverage these clues to improve the accuracy of the responses. Herein, we propose a simple \"Temporal Clue-driven Reasoning\" schema: after the model gives the first answer, we re-input the video segments corresponding to the obtained clues into the model at a higher resolution and frame rate, prompting it to answer again. By perceiving more details, the model can generate more accurate responses. Additionally, this operation is also well-suited for long video understanding under conditions of limited computing resources." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 399, + 192, + 413 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 399, + 192, + 413 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 192, + 413 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 426, + 504, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 426, + 504, + 449 + ], + "spans": [ + { + "bbox": [ + 104, + 426, + 504, + 449 + ], + "type": "text", + "content": "Implementation details. The main experiments are all conducted based on Qwen2.5-VL-7B [1] (except for the video captioning, for which Qwen2-VL-7B [26] is used)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 462, + 506, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 462, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 104, + 462, + 506, + 540 + ], + "type": "text", + "content": "**Benchmarks.** We employ MVBench [15], Perception Test [21], VideoMME [6] for evaluation of general video understanding. Given that the majority of videos in our training set are short-length, we only use the short subset of VideoMME in testing. For the temporal grounding task, we use the test set of Charade-STA [7] for in-domain testing and the test set of ActivityNet-Grounding [13] as out-domain test data. For the object tracking task, testing is done using the GoT-10k [10] dataset. For the QA and grounding QA tasks, the test set of NExTGQA [31] is used for testing. And we use Dream-1k [25] and VidTAB-QA [16] for the video captioning and video quality access." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 105, + 555, + 251, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 251, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 251, + 567 + ], + "type": "text", + "content": "4.1 Evaluation of VideoChat-R1" + } + ] + } + ], + "index": 19 + }, + { + "type": "table", + "bbox": [ + 106, + 582, + 504, + 679 + ], + "blocks": [ + { + "bbox": [ + 106, + 582, + 504, + 679 + ], + "lines": [ + { + "bbox": [ + 106, + 582, + 504, + 679 + ], + "spans": [ + { + "bbox": [ + 106, + 582, + 504, + 679 + ], + "type": "table", + "html": "
MethodCharades-STAActivityNetNExTGQAGoTVideoMMEMVBenchPeception Test
mIoUR@0.5R@0.7mIoUR@0.5R@0.7mIoUaccOverlapR@0.5Short-AvgAvgVal
Baseline
Qwen2.5-VL-7B29.024.211.121.115.87.515.459.512.61.171.366.969.1
SFT on specific tasks
+SFT w/ Charades-STA46.345.025.320.616.77.9----N/A*N/A*N/A*
+SFT w/ GoT--------41.829.559.258.658.5
+SFT w/ NExTGQA------28.264.8--60.159.260.7
GRPO on various tasks
VideoChat-R160.871.750.236.633.417.732.470.643.838.272.267.970.0
VideoChat-R1-thinking59.970.647.235.533.316.736.169.243.333.974.266.269.6
", + "image_path": "ebb1f5b7ddf5b410b150201df6d2a256266c29fa0069c30730645a948e7e5874.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "table_body" + } + ], + "index": 20 + }, + { + "bbox": [ + 104, + 680, + 504, + 725 + ], + "lines": [ + { + "bbox": [ + 104, + 680, + 504, + 725 + ], + "spans": [ + { + "bbox": [ + 104, + 680, + 504, + 725 + ], + "type": "text", + "content": "Table 1: Results of VideoChat-R1 on various Video Benchmarks. * indicates that the model has suffered from overfitting and is unable to answer the question properly. Since the number of input pixels is fixed during our evaluation, the baseline results are slightly lower than those reported in their origin paper [1]." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": "As shown in Table 1, after training with GRPO on spatio-temporal perception datasets, both VideoChat-R1 and VideoChat-R1-thinking significantly outperform the performance of Qwen2.5-VL and that of models fine-tuned through SFT for a single specific task across various spatiotemporal perception benchmarks and the general understanding benchmark VideoMME. This validates the effectiveness of our approach, which leverages multiple spatiotemporal perception datasets and RFT for enhancing spatiotemporal perception." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 144, + 506, + 189 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 144, + 506, + 189 + ], + "spans": [ + { + "bbox": [ + 104, + 144, + 506, + 189 + ], + "type": "text", + "content": "Meanwhile, we observe that for spatio-temporal perception tasks, engaging in thinking processes does not necessarily lead to performance gains. However, for tasks such as QA and VideoMME, which may require complex reasoning, conducting inferences during testing can result in notable performance improvements." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 203, + 272, + 214 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 203, + 272, + 214 + ], + "spans": [ + { + "bbox": [ + 105, + 203, + 272, + 214 + ], + "type": "text", + "content": "4.2 Ablation Studies and Discussions" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 224, + 506, + 269 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 224, + 506, + 269 + ], + "spans": [ + { + "bbox": [ + 104, + 224, + 506, + 269 + ], + "type": "text", + "content": "Muti-task Co-training. As shown in Table 2, we found that mixed training of different spatiotemporal perception tasks using GRPO can yield a synergistic improvement effect. Training with the multiple tasks achieves nearly the best results across all benchmarks. This reveals the potential of GRPO for larger-scale and multi-task collaborative training in the future." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 110, + 281, + 504, + 362 + ], + "blocks": [ + { + "bbox": [ + 110, + 281, + 504, + 362 + ], + "lines": [ + { + "bbox": [ + 110, + 281, + 504, + 362 + ], + "spans": [ + { + "bbox": [ + 110, + 281, + 504, + 362 + ], + "type": "table", + "html": "
MethodCharades-STAANetNExTGQAGoTVideoMME
mIoUR@0.5R@0.7mIoUR@0.5R@0.7mIoUaccOverlapR@0.5Short-Avg
Qwen2.5-VL-7B29.024.211.121.115.87.515.459.512.61.171.3
+GRPO w/ STA59.370.446.030.727.512.931.461.227.812.972.6
+GRPO w/GQA36.033.515.524.920.610.735.168.736.126.772.0
+GRPO w/ GoT28.725.19.620.116.26.815.660.542.530.671.4
+GRPO w/ STA-GQA59.869.747.033.731.016.035.767.736.528.972.2
+GRPO w/ STA-GQA-GoT60.871.750.236.633.417.732.470.643.838.272.2
", + "image_path": "9157e90250119c00b063011b92192f98c3ffde61f85f248f6cd013fdfd7e36b4.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 108, + 396, + 504, + 511 + ], + "blocks": [ + { + "bbox": [ + 163, + 364, + 446, + 375 + ], + "lines": [ + { + "bbox": [ + 163, + 364, + 446, + 375 + ], + "spans": [ + { + "bbox": [ + 163, + 364, + 446, + 375 + ], + "type": "text", + "content": "Table 2: Ablation results of Cotraining on Spatio-Temporal Tasks." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 108, + 396, + 504, + 511 + ], + "lines": [ + { + "bbox": [ + 108, + 396, + 504, + 511 + ], + "spans": [ + { + "bbox": [ + 108, + 396, + 504, + 511 + ], + "type": "table", + "html": "
MethodEpochsTraining PromptTest PromptCharades-STA (in domain)ActivityNet (out domain)VideoMME
ThinkAnswerThinkAnswermIoUR@0.3R@0.5R@0.7mIoUR@0.3R@0.5R@0.7Short-Avg
Vision Experts
FlashVTG [2]-------70.349.9-----
InternVideo2-6B [28]-------70.049.0-----
SG-DETR [8]--71.152.8-----
MLLMs
Qwen2.5-VL-7B (baseline)---29.044.724.211.121.128.315.87.471.3
---28.141.823.411.117.722.713.47.771.3
+SFT146.363.945.025.320.630.216.77.9N/A*(-71.3)
334.6(+6.5)51.736.320.617.3(-3.8)26.110.03.9N/A*(-71.3)
+GRPO158.780.967.745.431.946.328.814.172.6
159.3(+31.2)81.770.446.030.7(+13.0)45.027.512.973.6(+2.3)
361.3(+33.2)83.172.851.534.3(+16.6)50.432.216.270.9(-0.4)
", + "image_path": "23efda807d3631875492b128e1c6d43a6d1b3acfb866a8d220b7971a875b30c3.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 185, + 551, + 426, + 608 + ], + "blocks": [ + { + "bbox": [ + 104, + 513, + 504, + 536 + ], + "lines": [ + { + "bbox": [ + 104, + 513, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 513, + 504, + 536 + ], + "type": "text", + "content": "Table 3: Ablation results of Temporal Grounding Task. * indicates that the model has suffered from overfitting and is unable to answer the question properly." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 185, + 551, + 426, + 608 + ], + "lines": [ + { + "bbox": [ + 185, + 551, + 426, + 608 + ], + "spans": [ + { + "bbox": [ + 185, + 551, + 426, + 608 + ], + "type": "table", + "html": "
MethodGoTVideoMME
Average overlapR@0.5R@0.7Short-Avg
Qwen2.5-VL-7B12.61.1071.3
+SFT41.829.53.959.2
+GRPO42.5(+29.9)30.6(+29.5)3.9(+3.9)71.4(+0.1)
", + "image_path": "cc2f8bc7d7e5a4e75e02523ac34df5e291289f198558c63a1c2e62ec64dc125a.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 609, + 504, + 621 + ], + "lines": [ + { + "bbox": [ + 104, + 609, + 504, + 621 + ], + "spans": [ + { + "bbox": [ + 104, + 609, + 504, + 621 + ], + "type": "text", + "content": "Table 4: Ablation results of Object Tracking. We use 8 frames as input for training and evaluation." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "text", + "content": "Temporal Grounding and Object tracking. As shown in Table 3 and Table 4, fine-tuning Qwen2.5-VL using GRPO significantly improves the performance of temporal grounding and object tracking tasks. Additionally, it slightly enhances the performance on the general understanding benchmark VideoMME. Even when training for more epochs, GRPO is less prone to overfitting compared to SFT. Instead, it can continuously improve the performance of temporal grounding, eventually surpassing the performance of previous expert models. Moreover, stimulating the model's thinking ability provides some benefits for both temporal grounding and VideoMME tasks." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 149, + 70, + 465, + 220 + ], + "blocks": [ + { + "bbox": [ + 149, + 70, + 465, + 220 + ], + "lines": [ + { + "bbox": [ + 149, + 70, + 465, + 220 + ], + "spans": [ + { + "bbox": [ + 149, + 70, + 465, + 220 + ], + "type": "table", + "html": "
MethodTraining PromptTest PromptNExTGQAVideoMME
ThinkAnswerGlueThinkAnswerGluemIoUaccShort-Avg
Direct Output
Qwen2.5-VL-7B (baseline)-41.771.3
15.459.5-
+SFT-65.160.2
28.2(+12.8)64.8(+5.3)60.1(-11.2)
+GRPO-70.171.7
16.270.271.7
35.1(+19.7)68.7(+9.2)72.0(+0.7)
Chain-of-thought Output
Qwen2.5-VL-7B-47.773.0
20.253.372.2
+GRPO-68.874.7
32.9(+12.7)66.9(+13.6)75.3(+3.1)
", + "image_path": "66de61c965e6f7125a4d0851a6daa88d02b59216d290423ca25063a4fad099db.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 248, + 506, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 248, + 506, + 358 + ], + "spans": [ + { + "bbox": [ + 104, + 248, + 506, + 358 + ], + "type": "text", + "content": "Video Question Answer. As shown in Table 5, for the video question answering task, we selected the multi-choice QA task, which is easy to evaluate, for our experiments. Additionally, we explored the grounding QA task. In this task, when answering questions, the model is required to simultaneously provide the temporal cues on which its answers are based. Using merely a little over three thousand training data samples, we found that GRPO demonstrated remarkable fine-tuning capabilities. Not only did it lead to a substantial improvement in the performance of the NExTGQA task, but it also brought about a noticeable enhancement in the VideoMME task. We noticed that, unlike the previous strongly spatiotemporal perception tasks such as temporal grounding, thinking played a significant role in the QA task. Meanwhile, the glue signals also provided some assistance for relatively complex video understanding tasks like VideoMME." + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 189, + 369, + 426, + 442 + ], + "blocks": [ + { + "bbox": [ + 193, + 221, + 416, + 232 + ], + "lines": [ + { + "bbox": [ + 193, + 221, + 416, + 232 + ], + "spans": [ + { + "bbox": [ + 193, + 221, + 416, + 232 + ], + "type": "text", + "content": "Table 5: Ablation results of Multi-Choice Video QA." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 189, + 369, + 426, + 442 + ], + "lines": [ + { + "bbox": [ + 189, + 369, + 426, + 442 + ], + "spans": [ + { + "bbox": [ + 189, + 369, + 426, + 442 + ], + "type": "table", + "html": "
MethodDream-1kVidTAB-QA
F1PrecisionRecallAccuracy
Baseline30.633.827.970.7
+SFT31.432.630.271.7
+GRPO38.2(+7.6)45.4(+11.6)33.1(+5.2)72.6(+1.9)
", + "image_path": "b817657d6e54ca97c5f4318ea64a574570534ae3d5013321a235e1b68d612e10.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 189, + 475, + 426, + 544 + ], + "blocks": [ + { + "bbox": [ + 176, + 445, + 433, + 456 + ], + "lines": [ + { + "bbox": [ + 176, + 445, + 433, + 456 + ], + "spans": [ + { + "bbox": [ + 176, + 445, + 433, + 456 + ], + "type": "text", + "content": "Table 6: Results of Video Caption and Video Quality Access." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 189, + 475, + 426, + 544 + ], + "lines": [ + { + "bbox": [ + 189, + 475, + 426, + 544 + ], + "spans": [ + { + "bbox": [ + 189, + 475, + 426, + 544 + ], + "type": "table", + "html": "
MethodLLM JudgeDream-1k
F1PrecisionRecall
Baseline-30.633.827.9
+ GRPOGPT-3.5-turbo-0125 Qwen2.5-72B37.9(+7.3)44.4(+10.6)33(+5.1)
38.2(+7.6)45.4(+11.6)33.1(+5.2)
", + "image_path": "e28f9a49ff6de42978fa86862a63a8825cbaa5600ce4964dad48ddb70c43d31e.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 217, + 546, + 392, + 558 + ], + "lines": [ + { + "bbox": [ + 217, + 546, + 392, + 558 + ], + "spans": [ + { + "bbox": [ + 217, + 546, + 392, + 558 + ], + "type": "text", + "content": "Table 7: Ablation of Video Caption Task." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 506, + 623 + ], + "type": "text", + "content": "Video Caption and Video Quality Assessment. For the Video Caption and Video Quality Assessment tasks, we found that GRPO still demonstrated its advantages over SFT, As shown in Table 6. The significant metric improvements on these two benchmarks demonstrate the effectiveness of our approach." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "content": "Ablation of Reward Evaluators To assess the impact of different large language models (LLMs) as reward evaluators, we conducted parallel experiments using GPT-3.5-turbo-0125 and Qwen2.5-72B as distinct judges (Table 7). Models trained under both evaluators achieved nearly identical performance, demonstrating consistent caption reward generation across LLMs. We attribute this consistency to GRPO's fundamental mechanism: it relies on relative differential scoring within response groups rather than absolute reward values. This confirms that " + }, + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "inline_equation", + "content": "R_{recall}" + }, + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "content": " produces discriminative reward signals for predicted captions independent of the choice of LLM judge, validating both the efficacy of our reward design and the stability of its signaling mechanism. Crucially, when guided by these reliable" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": "reward signals, our approach delivers substantial performance gains in description tasks using only limited high-quality data, demonstrating remarkable data-efficiency and significant optimization potential." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 156, + 118, + 455, + 201 + ], + "blocks": [ + { + "bbox": [ + 156, + 118, + 455, + 201 + ], + "lines": [ + { + "bbox": [ + 156, + 118, + 455, + 201 + ], + "spans": [ + { + "bbox": [ + 156, + 118, + 455, + 201 + ], + "type": "table", + "html": "
Model\nAvg. DurationClue\nPerceptionVideoMME\n1010sLongVideoBench\n473s
Qwen2.5-VL-7B64.456.0
63.3(-1.1)55.2(-0.8)
VideoChat-R1-thinking62.151.9
63.6(+1.5)58.2(+6.3)
", + "image_path": "f09d73a22ceb8cfbd8381414bc174876dd7505bc28c0d0c76d6535620480222f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 201, + 504, + 236 + ], + "lines": [ + { + "bbox": [ + 104, + 201, + 504, + 236 + ], + "spans": [ + { + "bbox": [ + 104, + 201, + 504, + 236 + ], + "type": "text", + "content": "Table 8: Ablation results of \"Clue-Perception\". It should be noted that due to our adoption of a lower number of input pixels, the absolute performance is not entirely consistent with that reported for Qwen2.5-VL." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 258, + 506, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 258, + 506, + 369 + ], + "spans": [ + { + "bbox": [ + 104, + 258, + 506, + 369 + ], + "type": "text", + "content": "Ablation of \"Clue-Perception\" As shown in Table. 8, we compared the performance changes of the model with and without perception enhancement when applying the \"Clue-Perception\" strategy on two representative long video benchmarks [6, 30]. It is noteworthy that without the use of \"Clue-Perception\", VideoChat-R1 showed no significant performance improvement over Qwen2.5-VL-7B in long video tasks, which can be attributed to the fact that our training dataset consists entirely of short videos under 1 minute. However, after the application of the \"Clue-Perception\" operation, VideoChat-R1 demonstrated a significant performance enhancement, indicating that the clues it provides are more accurate and thus revealing the potential of clue-perception in long video understanding. In contrast, due to its insufficient spatiotemporal perception capability, Qwen2.5-VL-7B even exhibited a performance decline after the implementation of the \"Clue-Perception\" operation." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 380, + 504, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 504, + 425 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 504, + 425 + ], + "type": "text", + "content": "GRPO vs. SFT. It can be observed that across various types of tasks, GRPO outperforms SFT. Whether it is in terms of the performance on in-domain tasks, out-domain tasks, or the preservation of the original general performance, our experimental results demonstrate that GRPO is a promising fine-tuning approach. We will leave the large-scale comparison for future research." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 437, + 504, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 437, + 504, + 493 + ], + "spans": [ + { + "bbox": [ + 104, + 437, + 504, + 493 + ], + "type": "text", + "content": "Chain-of-thought vs. Direct Output. Based on the video tasks and experiments we have explored, which focus on spatiotemporal perception, the output of the chain of thought has not demonstrated obvious advantages. In some cases, it is even inferior to the direct output. We believe that how to define appropriate video reasoning tasks and evaluation methods remains to be explored. The existing training data is insufficient to activate the model to output truly effective video reasoning chains." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 136, + 540, + 160, + 549 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 540, + 160, + 549 + ], + "spans": [ + { + "bbox": [ + 136, + 540, + 160, + 549 + ], + "type": "text", + "content": "Video:" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 164, + 512, + 199, + 577 + ], + "blocks": [ + { + "bbox": [ + 164, + 512, + 199, + 577 + ], + "lines": [ + { + "bbox": [ + 164, + 512, + 199, + 577 + ], + "spans": [ + { + "bbox": [ + 164, + 512, + 199, + 577 + ], + "type": "image", + "image_path": "14772caaea820bce8abfff3fb25fe055a3d3f489303d7e859376b1f66919cc82.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 136, + 578, + 270, + 588 + ], + "lines": [ + { + "bbox": [ + 136, + 578, + 270, + 588 + ], + "spans": [ + { + "bbox": [ + 136, + 578, + 270, + 588 + ], + "type": "text", + "content": "Query: A person puts on a pair of shoes." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 203, + 512, + 237, + 577 + ], + "blocks": [ + { + "bbox": [ + 203, + 512, + 237, + 577 + ], + "lines": [ + { + "bbox": [ + 203, + 512, + 237, + 577 + ], + "spans": [ + { + "bbox": [ + 203, + 512, + 237, + 577 + ], + "type": "image", + "image_path": "dc2e7f5b87eda42235292244baad30099c0260240ad371faa2c3a5a56e9b7c19.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 689, + 504, + 712 + ], + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 712 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 712 + ], + "type": "text", + "content": "Figure 2: Examples on temporal grounding task. VideoChat-R1 gives a more accurate time interval after thinking." + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 241, + 512, + 276, + 577 + ], + "blocks": [ + { + "bbox": [ + 241, + 512, + 276, + 577 + ], + "lines": [ + { + "bbox": [ + 241, + 512, + 276, + 577 + ], + "spans": [ + { + "bbox": [ + 241, + 512, + 276, + 577 + ], + "type": "image", + "image_path": "8b7f6b3d5a683c9e85bc2a6d1964ab2ba421b06778bf9d9a77bfa254bc48d01d.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 279, + 512, + 314, + 577 + ], + "blocks": [ + { + "bbox": [ + 279, + 512, + 314, + 577 + ], + "lines": [ + { + "bbox": [ + 279, + 512, + 314, + 577 + ], + "spans": [ + { + "bbox": [ + 279, + 512, + 314, + 577 + ], + "type": "image", + "image_path": "4c1eda96c7469788fc0f4eed659c9324e2340ecd108e41f3e47883d9abc2b1d2.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 318, + 512, + 352, + 577 + ], + "blocks": [ + { + "bbox": [ + 318, + 512, + 352, + 577 + ], + "lines": [ + { + "bbox": [ + 318, + 512, + 352, + 577 + ], + "spans": [ + { + "bbox": [ + 318, + 512, + 352, + 577 + ], + "type": "image", + "image_path": "d4d21b6c2f0500b2d1c3036da4a9e1bf736b322c6df991e43cddd7bcba3cce63.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 355, + 512, + 390, + 577 + ], + "blocks": [ + { + "bbox": [ + 355, + 512, + 390, + 577 + ], + "lines": [ + { + "bbox": [ + 355, + 512, + 390, + 577 + ], + "spans": [ + { + "bbox": [ + 355, + 512, + 390, + 577 + ], + "type": "image", + "image_path": "3becb2134d2fda7594ca6af18f1f2dab9032a5d21f1b230980bb26d2653d10ee.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 393, + 512, + 428, + 577 + ], + "blocks": [ + { + "bbox": [ + 393, + 512, + 428, + 577 + ], + "lines": [ + { + "bbox": [ + 393, + 512, + 428, + 577 + ], + "spans": [ + { + "bbox": [ + 393, + 512, + 428, + 577 + ], + "type": "image", + "image_path": "065345b3bee4b365c92af6880c7186140669d936b910a2cc42e2993ef88b5aea.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 431, + 512, + 466, + 577 + ], + "blocks": [ + { + "bbox": [ + 431, + 512, + 466, + 577 + ], + "lines": [ + { + "bbox": [ + 431, + 512, + 466, + 577 + ], + "spans": [ + { + "bbox": [ + 431, + 512, + 466, + 577 + ], + "type": "image", + "image_path": "eaad3848c3f2cbd11d2d60de002994c397acb95d6539b0e3b8f9c88788af4071.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 136, + 594, + 190, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 594, + 190, + 603 + ], + "spans": [ + { + "bbox": [ + 136, + 594, + 190, + 603 + ], + "type": "text", + "content": "Ground Truth:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 204, + 594, + 318, + 603 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 204, + 594, + 318, + 603 + ], + "spans": [ + { + "bbox": [ + 204, + 594, + 318, + 603 + ], + "type": "text", + "content": "1.8s< 9.4s" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 136, + 608, + 155, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 608, + 155, + 616 + ], + "spans": [ + { + "bbox": [ + 136, + 608, + 155, + 616 + ], + "type": "text", + "content": "SFT:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 167, + 608, + 248, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 608, + 248, + 616 + ], + "spans": [ + { + "bbox": [ + 167, + 608, + 248, + 616 + ], + "type": "text", + "content": "1.3s←---->4.1s" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 136, + 625, + 190, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 136, + 625, + 190, + 635 + ], + "spans": [ + { + "bbox": [ + 136, + 625, + 190, + 635 + ], + "type": "text", + "content": "VideoChat-R1:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 200, + 625, + 337, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 625, + 337, + 635 + ], + "spans": [ + { + "bbox": [ + 200, + 625, + 337, + 635 + ], + "type": "text", + "content": "2.0s 10.7s" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 135, + 636, + 460, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 135, + 636, + 460, + 675 + ], + "spans": [ + { + "bbox": [ + 135, + 636, + 460, + 675 + ], + "type": "text", + "content": "\\nThe man is seen standing in front of a pantry, looking down at something on the floor. He then bends down and picks up a pair of shoes from the floor. He then proceeds to put on the shoes. The event " + }, + { + "bbox": [ + 135, + 636, + 460, + 675 + ], + "type": "inline_equation", + "content": "\"a person puts on a pair of shoes\"" + }, + { + "bbox": [ + 135, + 636, + 460, + 675 + ], + "type": "text", + "content": " occurs during this time. " + }, + { + "bbox": [ + 135, + 636, + 460, + 675 + ], + "type": "inline_equation", + "content": "\\backslash n < /" + }, + { + "bbox": [ + 135, + 636, + 460, + 675 + ], + "type": "text", + "content": " think>\\n< timestep>\\nThe event happens in 2.00 - 10.7 seconds.\\n\\n< answer>\\n2.00 to 10.67\\n" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 164, + 79, + 209, + 113 + ], + "blocks": [ + { + "bbox": [ + 137, + 92, + 162, + 101 + ], + "lines": [ + { + "bbox": [ + 137, + 92, + 162, + 101 + ], + "spans": [ + { + "bbox": [ + 137, + 92, + 162, + 101 + ], + "type": "text", + "content": "Video:" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 164, + 79, + 209, + 113 + ], + "lines": [ + { + "bbox": [ + 164, + 79, + 209, + 113 + ], + "spans": [ + { + "bbox": [ + 164, + 79, + 209, + 113 + ], + "type": "image", + "image_path": "487489ae1c1cc58b30c1bf03f7b482e23ff422fab2c58178909c5e2e21ee197f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 214, + 79, + 260, + 113 + ], + "blocks": [ + { + "bbox": [ + 214, + 79, + 260, + 113 + ], + "lines": [ + { + "bbox": [ + 214, + 79, + 260, + 113 + ], + "spans": [ + { + "bbox": [ + 214, + 79, + 260, + 113 + ], + "type": "image", + "image_path": "1ec648e71eaf0421911e1ef7520b793de89f5ab05562652eaaa335fc6fc413bc.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 226, + 504, + 250 + ], + "lines": [ + { + "bbox": [ + 104, + 226, + 504, + 250 + ], + "spans": [ + { + "bbox": [ + 104, + 226, + 504, + 250 + ], + "type": "text", + "content": "Figure 3: Examples on Video QA task. It can be seen that VideoChat-R1 can not only answer questions correctly but also provide relatively accurate reference time periods (clue)." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 268, + 79, + 312, + 113 + ], + "blocks": [ + { + "bbox": [ + 268, + 79, + 312, + 113 + ], + "lines": [ + { + "bbox": [ + 268, + 79, + 312, + 113 + ], + "spans": [ + { + "bbox": [ + 268, + 79, + 312, + 113 + ], + "type": "image", + "image_path": "9ff70423f7048b551160a4d558cb496609c136f669af2e980186a28f8919de18.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 319, + 79, + 365, + 113 + ], + "blocks": [ + { + "bbox": [ + 319, + 79, + 365, + 113 + ], + "lines": [ + { + "bbox": [ + 319, + 79, + 365, + 113 + ], + "spans": [ + { + "bbox": [ + 319, + 79, + 365, + 113 + ], + "type": "image", + "image_path": "4111b6c57103162103af289b7ae203f72e671c4a04ad2c215840b87e546c66ba.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 372, + 79, + 417, + 113 + ], + "blocks": [ + { + "bbox": [ + 372, + 79, + 417, + 113 + ], + "lines": [ + { + "bbox": [ + 372, + 79, + 417, + 113 + ], + "spans": [ + { + "bbox": [ + 372, + 79, + 417, + 113 + ], + "type": "image", + "image_path": "77431af26a587d74d78cf548fe934d2e4ac4879c9f8cd07d8ff92b3a6eaf09ee.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 423, + 79, + 469, + 113 + ], + "blocks": [ + { + "bbox": [ + 423, + 79, + 469, + 113 + ], + "lines": [ + { + "bbox": [ + 423, + 79, + 469, + 113 + ], + "spans": [ + { + "bbox": [ + 423, + 79, + 469, + 113 + ], + "type": "image", + "image_path": "348f3183f84c293ceb77e665d8f8e8125828222eb63fc005e492dbd23b5b10b7.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 138, + 114, + 385, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 114, + 385, + 125 + ], + "spans": [ + { + "bbox": [ + 138, + 114, + 385, + 125 + ], + "type": "text", + "content": "Question: Why did the girl suddenly jump up from where she was standing?" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 138, + 130, + 231, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 130, + 231, + 139 + ], + "spans": [ + { + "bbox": [ + 138, + 130, + 231, + 139 + ], + "type": "text", + "content": "Ground Truth: Excited." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 269, + 129, + 313, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 269, + 129, + 313, + 139 + ], + "spans": [ + { + "bbox": [ + 269, + 129, + 313, + 139 + ], + "type": "text", + "content": "[20.9s, 25.0s]" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 379, + 129, + 412, + 138 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 379, + 129, + 412, + 138 + ], + "spans": [ + { + "bbox": [ + 379, + 129, + 412, + 138 + ], + "type": "text", + "content": "E--->" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 139, + 145, + 227, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 145, + 227, + 154 + ], + "spans": [ + { + "bbox": [ + 139, + 145, + 227, + 154 + ], + "type": "text", + "content": "SFT: Dance." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 269, + 145, + 313, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 269, + 145, + 313, + 154 + ], + "spans": [ + { + "bbox": [ + 269, + 145, + 313, + 154 + ], + "type": "text", + "content": "[19.4s, 37.2s]" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 377, + 145, + 410, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 377, + 145, + 410, + 154 + ], + "spans": [ + { + "bbox": [ + 377, + 145, + 410, + 154 + ], + "type": "text", + "content": "k--" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 424, + 145, + 444, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 424, + 145, + 444, + 154 + ], + "spans": [ + { + "bbox": [ + 424, + 145, + 444, + 154 + ], + "type": "text", + "content": "---→" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 163, + 231, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 163, + 231, + 172 + ], + "spans": [ + { + "bbox": [ + 138, + 163, + 231, + 172 + ], + "type": "text", + "content": "VideoChat-R1: Excited." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 269, + 163, + 313, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 269, + 163, + 313, + 171 + ], + "spans": [ + { + "bbox": [ + 269, + 163, + 313, + 171 + ], + "type": "text", + "content": "[20.1s, 28.0s]" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 378, + 163, + 421, + 171 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 378, + 163, + 421, + 171 + ], + "spans": [ + { + "bbox": [ + 378, + 163, + 421, + 171 + ], + "type": "text", + "content": "<- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 138, + 173, + 465, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 173, + 465, + 201 + ], + "spans": [ + { + "bbox": [ + 138, + 173, + 465, + 201 + ], + "type": "text", + "content": " The girl jumps up from her position after playing with the rocket toy and then walks around the room holding it. This suggests that she is excited about the toy and wants to show it off. The jump could be interpreted as an expression of excitement. " + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 138, + 202, + 302, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 202, + 302, + 212 + ], + "spans": [ + { + "bbox": [ + 138, + 202, + 302, + 212 + ], + "type": "text", + "content": "E[(20.1,28.0)]" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 270, + 212, + 282 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 270, + 212, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 270, + 212, + 282 + ], + "type": "text", + "content": "4.3 Qualitative Results" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 290, + 504, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 290, + 504, + 368 + ], + "spans": [ + { + "bbox": [ + 104, + 290, + 504, + 368 + ], + "type": "text", + "content": "As shown in Figure 2 and 3, we provide visualizations of VideoChat-R1's outputs for the temporal grounding and video QA tasks. We observe that due to the enhanced spatiotemporal perception capability, VideoChat-R1 can more accurately locate video segments relevant to the question and conduct reasonable reasoning to answer the question. However, compared with the complex chains of thought involved in solving mathematical problems or coding tasks in text and image domains, the chains of thought generated in our current video tasks are relatively simple. We will explore more complex video reasoning tasks in future research." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 383, + 188, + 395 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 383, + 188, + 395 + ], + "spans": [ + { + "bbox": [ + 105, + 383, + 188, + 395 + ], + "type": "text", + "content": "5 Conclusions" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 407, + 506, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 407, + 506, + 485 + ], + "spans": [ + { + "bbox": [ + 104, + 407, + 506, + 485 + ], + "type": "text", + "content": "In this work, we systematically investigate the role of reinforcement fine-tuning (RFT) with Group Relative Policy Optimization (GRPO) in enhancing video-centric multimodal large language models (MLLMs). Our experiments demonstrate that RFT is a highly data-efficient paradigm for task-specific improvements, enabling VideoChat-R1—a model trained with limited samples via multi-task RFT—to achieve state-of-the-art performance on spatio-temporal perception tasks while preserving general chat capabilities and exhibiting emergent spatiotemporal reasoning. We believe our work can present relevant insights for future research efforts in reinforcement learning of video MLLMs." + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 89, + 506, + 722 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 111, + 89, + 505, + 111 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 89, + 505, + 111 + ], + "spans": [ + { + "bbox": [ + 111, + 89, + 505, + 111 + ], + "type": "text", + "content": "[1] Shuai Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Sibo Song, Kai Dang, Peng Wang, Shijie Wang, Jun Tang, et al. Qwen2. 5-vl technical report. arXiv preprint arXiv:2502.13923, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 118, + 505, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 118, + 505, + 138 + ], + "spans": [ + { + "bbox": [ + 111, + 118, + 505, + 138 + ], + "type": "text", + "content": "[2] Zhuo Cao, Bingqing Zhang, Heming Du, Xin Yu, Xue Li, and Sen Wang. Flashvtg: Feature layering and adaptive score handling network for video temporal grounding. arXiv preprint arXiv:2412.13441, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 146, + 506, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 146, + 506, + 176 + ], + "spans": [ + { + "bbox": [ + 111, + 146, + 506, + 176 + ], + "type": "text", + "content": "[3] Huilin Deng, Ding Zou, Rui Ma, Hongchen Luo, Yang Cao, and Yu Kang. Boosting the generalization and reasoning of vision language models with curriculum reinforcement learning. arXiv preprint arXiv:2503.07065, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 184, + 504, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 184, + 504, + 215 + ], + "spans": [ + { + "bbox": [ + 111, + 184, + 504, + 215 + ], + "type": "text", + "content": "[4] Yihe Deng, Hritik Bansal, Fan Yin, Nanyun Peng, Wei Wang, and Kai-Wei Chang. Openthinker: An early exploration to complex vision-language reasoning via iterative self-improvement. arXiv preprint arXiv:2503.17352, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 223, + 504, + 245 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 223, + 504, + 245 + ], + "spans": [ + { + "bbox": [ + 111, + 223, + 504, + 245 + ], + "type": "text", + "content": "[5] Kaituo Feng, Kaixiong Gong, Bohao Li, Zonghao Guo, Yibing Wang, Tianshuo Peng, Benyou Wang, and Xiangyu Yue. Video-r1: Reinforcing video reasoning in mllms. arXiv preprint arXiv:2503.21776, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 251, + 505, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 251, + 505, + 282 + ], + "spans": [ + { + "bbox": [ + 111, + 251, + 505, + 282 + ], + "type": "text", + "content": "[6] Chaoyou Fu, Yuhan Dai, Yondong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 290, + 504, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 290, + 504, + 312 + ], + "spans": [ + { + "bbox": [ + 111, + 290, + 504, + 312 + ], + "type": "text", + "content": "[7] Jiyang Gao, Chen Sun, Zhenheng Yang, and Ram Nevatia. Tall: Temporal activity localization via language query. In Proceedings of the IEEE international conference on computer vision, pages 5267-5275, 2017." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 318, + 504, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 318, + 504, + 339 + ], + "spans": [ + { + "bbox": [ + 111, + 318, + 504, + 339 + ], + "type": "text", + "content": "[8] Aleksandr Gordeev, Vladimir Dokholyan, Irina Tolstykh, and Maksim Kuprashevich. Saliency-guided detr for moment retrieval and highlight detection. arXiv preprint arXiv:2410.01615, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 346, + 504, + 377 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 346, + 504, + 377 + ], + "spans": [ + { + "bbox": [ + 111, + 346, + 504, + 377 + ], + "type": "text", + "content": "[9] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 384, + 504, + 415 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 384, + 504, + 415 + ], + "spans": [ + { + "bbox": [ + 106, + 384, + 504, + 415 + ], + "type": "text", + "content": "[10] Lianghua Huang, Xin Zhao, and Kaiqi Huang. Got-10k: A large high-diversity benchmark for generic object tracking in the wild. IEEE transactions on pattern analysis and machine intelligence, 43(5): 1562–1577, 2019." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 423, + 504, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 423, + 504, + 454 + ], + "spans": [ + { + "bbox": [ + 106, + 423, + 504, + 454 + ], + "type": "text", + "content": "[11] Zhenpeng Huang, Xinhao Li, Jiaqi Li, Jing Wang, Xiangyu Zeng, Cheng Liang, Tao Wu, Xi Chen, Liang Li, and Limin Wang. Online video understanding: A comprehensive benchmark and memory-augmented method. arXiv preprint arXiv:2501.00584, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 461, + 504, + 492 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 461, + 504, + 492 + ], + "spans": [ + { + "bbox": [ + 106, + 461, + 504, + 492 + ], + "type": "text", + "content": "[12] Aaron Jaech, Adam Kalai, Adam Lerer, Adam Richardson, Ahmed El-Kishky, Aiden Low, Alec Helyar, Aleksander Madry, Alex Beutel, Alex Carney, et al. Openai o1 system card. arXiv preprint arXiv:2412.16720, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 500, + 504, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 500, + 504, + 521 + ], + "spans": [ + { + "bbox": [ + 106, + 500, + 504, + 521 + ], + "type": "text", + "content": "[13] Ranjay Krishna, Kenji Hata, Frederic Ren, Li Fei-Fei, and Juan Carlos Niebles. Dense-captioning events in videos. In Proceedings of the IEEE international conference on computer vision, pages 706–715, 2017." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 529, + 504, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 529, + 504, + 550 + ], + "spans": [ + { + "bbox": [ + 106, + 529, + 504, + 550 + ], + "type": "text", + "content": "[14] KunChang Li, Yinan He, Yi Wang, Yizhuo Li, Wenhai Wang, Ping Luo, Yali Wang, Limin Wang, and Yu Qiao. Videochat: Chat-centric video understanding. arXiv preprint arXiv:2305.06355, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 557, + 506, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 557, + 506, + 588 + ], + "spans": [ + { + "bbox": [ + 106, + 557, + 506, + 588 + ], + "type": "text", + "content": "[15] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mybench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 596, + 504, + 625 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 596, + 504, + 625 + ], + "spans": [ + { + "bbox": [ + 106, + 596, + 504, + 625 + ], + "type": "text", + "content": "[16] Xinhao Li, Zhenpeng Huang, Jing Wang, Kunchang Li, and Limin Wang. Videoeval: Comprehensive benchmark suite for low-cost evaluation of video foundation model. arXiv preprint arXiv:2407.06491, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 634, + 504, + 665 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 634, + 504, + 665 + ], + "spans": [ + { + "bbox": [ + 106, + 634, + 504, + 665 + ], + "type": "text", + "content": "[17] Xinhao Li, Yi Wang, Jiashuo Yu, Xiangyu Zeng, Yuhan Zhu, Haian Huang, Jianfei Gao, Kunchang Li, Yinan He, Chenting Wang, et al. Videochat-flash: Hierarchical compression for long-context video modeling. arXiv preprint arXiv:2501.00574, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 106, + 672, + 504, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 672, + 504, + 693 + ], + "spans": [ + { + "bbox": [ + 106, + 672, + 504, + 693 + ], + "type": "text", + "content": "[18] Yuqi Liu, Bohao Peng, Zhisheng Zhong, Zihao Yue, Fanbin Lu, Bei Yu, and Jiaya Jia. Seg-zero: Reasoning-chain guided segmentation via cognitive reinforcement. arXiv preprint arXiv:2503.06520, 2025." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 106, + 700, + 504, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 700, + 504, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 700, + 504, + 722 + ], + "type": "text", + "content": "[19] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 103 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 103 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 103 + ], + "type": "text", + "content": "[20] Muhammad Maaz, Hanoona Rasheed, Salman Khan, and Fahad Shahbaz Khan. Video-chatgpt: Towards detailed video understanding via large vision and language models. arXiv preprint arXiv:2306.05424, 2023." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 110, + 506, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 110, + 506, + 150 + ], + "spans": [ + { + "bbox": [ + 106, + 110, + 506, + 150 + ], + "type": "text", + "content": "[21] Viorica Patraucean, Lucas Smaira, Ankush Gupta, Adria Recasens, Larisa Markeeva, Dylan Banarse, Skanda Koppula, Mateusz Malinowski, Yi Yang, Carl Doersch, et al. Perception test: A diagnostic benchmark for multimodal video models. Advances in Neural Information Processing Systems, 36: 42748-42761, 2023." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 157, + 504, + 189 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 157, + 504, + 189 + ], + "spans": [ + { + "bbox": [ + 106, + 157, + 504, + 189 + ], + "type": "text", + "content": "[22] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b lmms with strong reasoning abilities through two-stage rule-based r1. arXiv preprint arXiv:2503.07536, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 194, + 504, + 226 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 194, + 504, + 226 + ], + "spans": [ + { + "bbox": [ + 106, + 194, + 504, + 226 + ], + "type": "text", + "content": "[23] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 232, + 505, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 232, + 505, + 264 + ], + "spans": [ + { + "bbox": [ + 106, + 232, + 505, + 264 + ], + "type": "text", + "content": "[24] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 270, + 504, + 292 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 270, + 504, + 292 + ], + "spans": [ + { + "bbox": [ + 106, + 270, + 504, + 292 + ], + "type": "text", + "content": "[25] Jiawei Wang, Liping Yuan, Yuchen Zhang, and Haomiao Sun. Tarsier: Recipes for training and evaluating large video description models. arXiv preprint arXiv:2407.00634, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 298, + 504, + 329 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 298, + 504, + 329 + ], + "spans": [ + { + "bbox": [ + 107, + 298, + 504, + 329 + ], + "type": "text", + "content": "[26] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 335, + 504, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 335, + 504, + 366 + ], + "spans": [ + { + "bbox": [ + 106, + 335, + 504, + 366 + ], + "type": "text", + "content": "[27] Ye Wang, Boshen Xu, Zihao Yue, Zihan Xiao, Ziheng Wang, Liang Zhang, Dingyi Yang, Wenxuan Wang, and Qin Jin. Timezero: Temporal video grounding with reasoning-guided lvm. arXiv preprint arXiv:2503.13377, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 373, + 505, + 405 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 373, + 505, + 405 + ], + "spans": [ + { + "bbox": [ + 106, + 373, + 505, + 405 + ], + "type": "text", + "content": "[28] Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Guo Chen, Baoqi Pei, Rongkun Zheng, Zun Wang, Yansong Shi, et al. Internvideo2: Scaling foundation models for multimodal video understanding. In European Conference on Computer Vision, pages 396-416. Springer, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 411, + 505, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 411, + 505, + 442 + ], + "spans": [ + { + "bbox": [ + 106, + 411, + 505, + 442 + ], + "type": "text", + "content": "[29] Yi Wang, Xinhao Li, Ziang Yan, Yinan He, Jiashuo Yu, Xiangyu Zeng, Chenting Wang, Changlian Ma, Haian Huang, Jianfei Gao, et al. Internvideo2.5: Empowering video mllms with long and rich context modeling. arXiv preprint arXiv:2501.12386, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 448, + 505, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 448, + 505, + 479 + ], + "spans": [ + { + "bbox": [ + 106, + 448, + 505, + 479 + ], + "type": "text", + "content": "[30] Haoning Wu, Dongxu Li, Bei Chen, and Junnan Li. Longvideobench: A benchmark for long-context interleaved video-language understanding. Advances in Neural Information Processing Systems, 37: 28828-28857, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 485, + 504, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 485, + 504, + 517 + ], + "spans": [ + { + "bbox": [ + 106, + 485, + 504, + 517 + ], + "type": "text", + "content": "[31] Junbin Xiao, Angela Yao, Yicong Li, and Tat-Seng Chua. Can i trust your answer? visually grounded video question answering. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13204-13214, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 523, + 504, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 523, + 504, + 544 + ], + "spans": [ + { + "bbox": [ + 106, + 523, + 504, + 544 + ], + "type": "text", + "content": "[32] Yifan Xu, Xinhao Li, Yichun Yang, Rui Huang, and Limin Wang. Fine-grained video-text retrieval: A new benchmark and method. arXiv preprint arXiv:2501.00513, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 550, + 505, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 550, + 505, + 582 + ], + "spans": [ + { + "bbox": [ + 106, + 550, + 505, + 582 + ], + "type": "text", + "content": "[33] Ziang Yan, Zhilin Li, Yinan He, Chenting Wang, Kunchang Li, Xinhao Li, Xiangyu Zeng, Zilei Wang, Yali Wang, Yu Qiao, et al. Task preference optimization: Improving multimodal large language models with vision task alignment. arXiv preprint arXiv:2412.19326, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 588, + 504, + 610 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 588, + 504, + 610 + ], + "spans": [ + { + "bbox": [ + 106, + 588, + 504, + 610 + ], + "type": "text", + "content": "[34] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 616, + 505, + 647 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 616, + 505, + 647 + ], + "spans": [ + { + "bbox": [ + 106, + 616, + 505, + 647 + ], + "type": "text", + "content": "[35] Yi Yang, Xiaoxuan He, Hongkun Pan, Xiyan Jiang, Yan Deng, Xingtao Yang, Haoyu Lu, Dacheng Yin, Fengyun Rao, Minfeng Zhu, et al. R1-onevision: Advancing generalized multimodal reasoning through cross-modal formalization. arXiv preprint arXiv:2503.10615, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 653, + 505, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 653, + 505, + 685 + ], + "spans": [ + { + "bbox": [ + 106, + 653, + 505, + 685 + ], + "type": "text", + "content": "[36] En Yu, Liang Zhao, Yana Wei, Jinrong Yang, Dongming Wu, Lingyu Kong, Haoran Wei, Tiancai Wang, Zheng Ge, Xiangyu Zhang, et al. Merlin: Empowering multimodal llms with foresight minds. In European Conference on Computer Vision, pages 425-443. Springer, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 691, + 505, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 691, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 691, + 505, + 723 + ], + "type": "text", + "content": "[37] Xiangyu Zeng, Kunchang Li, Chenting Wang, Xinhao Li, Tianxiang Jiang, Ziang Yan, Songze Li, Yansong Shi, Zhengrong Yue, Yi Wang, et al. Timesuite: Improving mllms for long video understanding via grounded tuning. arXiv preprint arXiv:2410.19702, 2024." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 264 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "type": "text", + "content": "[38] Yufei Zhan, Yousong Zhu, Shurong Zheng, Hongyin Zhao, Fan Yang, Ming Tang, and Jinqiao Wang. Vision-r1: Evolving human-free alignment in large vision-language models via vision-guided reinforcement learning. arXiv preprint arXiv:2503.18013, 2025." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 110, + 505, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 110, + 505, + 142 + ], + "spans": [ + { + "bbox": [ + 106, + 110, + 505, + 142 + ], + "type": "text", + "content": "[39] Yufei Zhan, Yousong Zhu, Shurong Zheng, Hongyin Zhao, Fan Yang, Ming Tang, and Jinqiao Wang. Vision-r1: Evolving human-free alignment in large vision-language models via vision-guided reinforcement learning. arXiv preprint arXiv:2503.18013, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 148, + 505, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 148, + 505, + 179 + ], + "spans": [ + { + "bbox": [ + 106, + 148, + 505, + 179 + ], + "type": "text", + "content": "[40] Jingyi Zhang, Jiaxing Huang, Huanjin Yao, Shunyu Liu, Xikun Zhang, Shijian Lu, and Dacheng Tao. R1-vl: Learning to reason with multimodal large language models via step-wise group relative policy optimization. arXiv preprint arXiv:2503.12937, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 186, + 505, + 208 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 186, + 505, + 208 + ], + "spans": [ + { + "bbox": [ + 106, + 186, + 505, + 208 + ], + "type": "text", + "content": "[41] Yuanhan Zhang, Jinming Wu, Wei Li, Bo Li, Zejun Ma, Ziwei Liu, and Chunyuan Li. Video instruction tuning with synthetic data. arXiv preprint arXiv:2410.02713, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 213, + 505, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 213, + 505, + 236 + ], + "spans": [ + { + "bbox": [ + 106, + 213, + 505, + 236 + ], + "type": "text", + "content": "[42] Jiaxing Zhao, Xihan Wei, and Liefeng Bo. R1-omni: Explainable omni-multimodal emotion recognition with reinforcement learning. arXiv e-prints, pages arXiv-2503, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 241, + 505, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 241, + 505, + 264 + ], + "spans": [ + { + "bbox": [ + 106, + 241, + 505, + 264 + ], + "type": "text", + "content": "[43] Hengguang Zhou, Xinui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. R1-zero's\" aha moment\" in visual reasoning on a 2b non-sft model. arXiv preprint arXiv:2503.05132, 2025." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07052/ecb0ea5a-e806-4206-b96f-d3cf7b2ea5b5_content_list.json b/data/2025/2504_07xxx/2504.07052/ecb0ea5a-e806-4206-b96f-d3cf7b2ea5b5_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..3a8a6abe9499650c7e3d81bc30e890e65445fa97 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/ecb0ea5a-e806-4206-b96f-d3cf7b2ea5b5_content_list.json @@ -0,0 +1,3355 @@ +[ + { + "type": "text", + "text": "To Backtrack or Not to Backtrack: When Sequential Search Limits Model Reasoning", + "text_level": 1, + "bbox": [ + 169, + 99, + 710, + 142 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Tian Qin*", + "bbox": [ + 181, + 172, + 261, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Harvard University", + "bbox": [ + 181, + 186, + 328, + 202 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "David Alvarez-Melis", + "bbox": [ + 486, + 171, + 655, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Harvard University, Kempner Institute, MSR", + "bbox": [ + 486, + 186, + 813, + 202 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Samy Jelassi†", + "bbox": [ + 181, + 220, + 287, + 237 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Harvard University, Kempner Institute", + "bbox": [ + 181, + 237, + 467, + 252 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Eran Malach‡†", + "bbox": [ + 506, + 220, + 616, + 234 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Harvard University, Kempner Institute", + "bbox": [ + 506, + 236, + 790, + 252 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 457, + 286, + 539, + 301 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advancements in large language models (LLMs) have significantly improved their reasoning abilities, particularly through techniques involving search and backtracking. Backtracking naturally scales test-time compute by enabling sequential, linearized exploration via long chain-of-thought (CoT) generation. However, this is not the only strategy for scaling test time-compute: parallel sampling with best-of- $n$ selection provides an alternative that generates diverse solutions simultaneously. Despite the growing adoption of sequential search, its advantages over parallel sampling—especially under a fixed compute budget—remain poorly understood. In this paper, we systematically compare these two approaches on two challenging reasoning tasks: CountDown and Sudoku. Surprisingly, we find that sequential search underperforms parallel sampling on CountDown but outperforms it on Sudoku, suggesting that backtracking is not universally beneficial. We identify two factors that can cause backtracking to degrade performance: (1) training on fixed search traces can lock models into suboptimal strategies, and (2) explicit CoT supervision can discourage 'implicit' (non verbalized) reasoning. Extending our analysis to reinforcement learning (RL), we show that models with backtracking capabilities benefit significantly from RL fine-tuning, while models without backtracking see limited, mixed gains. Together, these findings challenge the assumption that backtracking universally enhances LLM reasoning, instead revealing a complex interaction between task structure, training data, model scale, and learning paradigm.", + "bbox": [ + 228, + 316, + 769, + 638 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 655, + 320, + 671 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent studies (Kumar et al., 2024; Havrilla et al., 2024) propose teaching LLMs to correct mistakes through backtracking, enabling exploration of alternative solutions. Despite growing popularity (DeepSeek-AI et al., 2025; Muennighoff et al., 2025), it remains unclear whether correcting errors post-hoc via backtracking is ultimately more compute-efficient at test time than directly learning the correct solution. Solving strategic games such as CountDown and Sudo require extensive exploration of different solution paths, making them ideal for analyzing the computational trade-offs of sequential versus parallel search. In this work, we use these two games to conduct a controlled investigation to determine whether backtracking is an effective way to scale test-time compute.", + "bbox": [ + 169, + 678, + 826, + 806 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "There are two primary strategies to scale LLMs' test-time compute: sequential autoregressive search (explicit backtracking within a chain-of-thought) and parallel sampling (generating multiple independent solutions and selecting the best with best-of- $n$ ). While sequential search allows the model to refine reasoning by learning from past mistakes, it comes at a cost: due to the attention mechanism, the FLOPs required to generate CoT grow quadratically", + "bbox": [ + 169, + 815, + 826, + 888 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 31, + 517, + 47 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Correspondence to tqin@g.harvard.edu ‡ Equal senior contributions.", + "bbox": [ + 189, + 895, + 650, + 910 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "†Currently at Apple.", + "bbox": [ + 191, + 910, + 328, + 924 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.07052v2 [cs.LG] 3 Oct 2025", + "bbox": [ + 22, + 284, + 58, + 715 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/4845c8f0e654443d5b277561c128b32ab3ce49a0eb29172ae399b108b2328f01.jpg", + "image_caption": [ + "Figure 1: Backtracking performance varies significantly with task type and the application of post-training reinforcement learning. (A) Training backtracking and direct solution models on CountDown and Selenium reveals task-dependent performance: under equal test-time compute, backtracking (sequential search) underperforms direct solution with best-of- $n$ generation (parallel search) on CountDown, but outperforms it on Selenium. (B) Fine-tuning with GRPO consistently improves backtracking model performance across compute budgets, but has mixed effects on the direct solution model." + ], + "image_footnote": [], + "bbox": [ + 178, + 103, + 612, + 239 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/f9c05a7eabbf3900cda3687f00654531d5b068bf60ac7e40b2f0d44c39add68a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 614, + 104, + 823, + 239 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "with sequence length. Even when generating the same number of tokens, sequential search incurs more FLOPs than parallel sampling. To compare these two strategies, we train (i) backtracking models that learn from explicit search traces and use sequential search to solve hard problems, and (ii) direct solution (i.e., no backtracking) models that learn solely from correct solutions, using parallel search at test time. Equating test-time compute, we observe contrasting results (Fig. 1 A): in CountDown, the backtracking model consistently underperforms, whereas in Sudo, it consistently outperforms the direct solution model.", + "bbox": [ + 169, + 378, + 826, + 479 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Through controlled experiments, we identify two reasons teaching backtracking can inadvertently degrade performance. First, explicit backtracking reasoning traces bias models toward prescribed search strategies, limiting exploration of potentially superior alternatives. In CountDown, the backtracking model closely mimics training search paths, while the direct solution model independently discovers more efficient strategies (Section 4.2). Second, detailed backtracking traces encourage morbidity (producing lengthy yet ineffective reasoning chains), while discouraging internal \"thinking\" (implicit reasoning without outputting CoT, Section 4.3). Beyond these factors, we demonstrate that model size and task-specific characteristics also impact the effectiveness of backtracking (Section 5.1). Crucially, we show that our contrastive observation between Sudoku and Countdown generalizes to real-world tasks: such as math and science problem solving. We show that backtracking is not always the most effective way to scale test-time compute (Appendix A) for general reasoning models.", + "bbox": [ + 169, + 489, + 828, + 672 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Extending beyond supervised learning, we evaluate reinforcement learning (RL) with Group Relative Policy Optimization (GRPO) (Shao et al., 2024), uncovering novel interactions between backtracking capabilities and RL. We show that the backtracking model discovers new, effective search strategies through RL, achieving substantial performance improvements. Conversely, the direct solution model improves one-shot accuracy but loses effectiveness in parallel search, revealing a clear trade-off (Fig. 1 B). This finding shifts our understanding of how backtracking influences a model's potential to improve under RL, highlighting the unique advantage of teaching backtracking for long-term reasoning capabilities.", + "bbox": [ + 169, + 683, + 826, + 797 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our controlled study on two strategic games provides a nuanced understanding of when backtracking effectively scales test-time compute. Our main contributions are:", + "bbox": [ + 169, + 808, + 823, + 838 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We use CountDown and Selenium as controlled testbeds to examine whether backtracking enables efficient test-time scaling. Under a fixed compute budget, backtracking outperforms parallel search in Selenium but underperforms in CountDown (Fig. 1 A).", + "- We identify two key factors affecting backtracking efficacy: (1) Prescribed search bias: Training on detailed backtracking traces can unintentionally constrain models to subopti" + ], + "bbox": [ + 174, + 851, + 825, + 925 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 946, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/16a0114708d8b92c3f2baea7ffa0d6e48f30371b94caec65de5a259c8b2f430a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 222, + 103, + 488, + 306 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/02b12a0d63aecd5659c4e656b5c8285ea2c85ddb288feb1efb1f169ec1e01426.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 516, + 103, + 790, + 281 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/93f7d7867694af716476750ed208a1f45b43f84d6a6f61f3e5d59f57fa4913ff.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 222, + 309, + 764, + 364 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/812d28a296faed703ecac1e5215249a835aa2ce1668ed109dca4556d34e7a424.jpg", + "image_caption": [ + "Direct solution model performs parallel search through temperature sampling:", + "Figure 2: Backtracking and direct solution for two different strategic games. Panel $(a, b)$ : Example the search tree for CountDown and Sudoku. Solving both games require extensive search in the solution space. Panel $(c)$ : The backtracking model is trained on the search traces generated by a Depth-First-Search (DFS) algorithm. At test time, the model performs sequential search. The direct solution model is trained on the correct solution only. At test time, the model performs parallel search through temperature sampling and takes best-of- $n$ ." + ], + "image_footnote": [], + "bbox": [ + 284, + 407, + 624, + 489 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "mal search strategies. (2) Excessive morbidity: Explicit backtracking traces encourage models to produce lengthy reasoning chains without improving reasoning ability.", + "bbox": [ + 189, + 614, + 823, + 646 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "- We demonstrate that reinforcement learning (GRPO) consistently enhances backtracking models by enabling discovery of novel solutions, whereas direct solution models experience mixed outcomes (Fig. 1 B).", + "bbox": [ + 176, + 646, + 825, + 689 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 171, + 702, + 328, + 718 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "See Appendix B for an extensive review on related work.", + "bbox": [ + 169, + 726, + 584, + 741 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Scaling test-time compute. Prior work has explored scaling language model performance at test time through parallel or sequential search strategies. Parallel methods rely on independent sampling and selection via heuristics or reward models (Brown et al., 2024; Irvine et al., 2023; Levi, 2024; Xin et al., 2024), while sequential methods refine reasoning step by step using earlier outputs (Hou et al., 2025; Lee et al., 2025). Tree-based methods such as MCTS bridge the two and often incorporate process-level reward models to guide reasoning (Wu et al., 2024; Lightman et al., 2023). Our work contributes to this area by comparing sequential (backtracking) and parallel search under fixed compute budgets.", + "bbox": [ + 169, + 748, + 826, + 862 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Self-correction and backtracking. Language models can be trained to self-correct through fine-tuning on revision data, synthetic augmentations, or reward-based learning (Saunders et al., 2022; Qu et al., 2024; Welleck et al., 2022). Some approaches also introduce explicit search or separate correction modules to guide revision (Yao et al., 2023b; Havrilla et al.,", + "bbox": [ + 169, + 867, + 826, + 925 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 31, + 517, + 47 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2024). We build on this line of work by studying backtracking as an implicit form of self-correction, analyzing when learning to backtrack helps or hinders reasoning.", + "bbox": [ + 169, + 102, + 826, + 133 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Reinforcement learning for LLM reasoning. Reinforcement learning has shown promise in enabling language models to autonomously discover reasoning strategies, including through simplified algorithms like GRPO (Shao et al., 2024; DeepSeek-AI et al., 2025). While prior work has demonstrated strong results, it remains unclear which model properties enable successful RL-based reasoning (Zelikman et al., 2022; Kazemnejad et al., 2024). Our study addresses this gap by comparing how backtracking and no backtracking models respond to RL fine-tuning, revealing asymmetric benefits.", + "bbox": [ + 169, + 146, + 826, + 247 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Two strategic games: CountDown and Sudoku", + "text_level": 1, + "bbox": [ + 169, + 263, + 622, + 282 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 CountDown", + "text_level": 1, + "bbox": [ + 171, + 291, + 307, + 305 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1.1 Game setup", + "text_level": 1, + "bbox": [ + 171, + 311, + 312, + 328 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The Game of CountDown has been frequently used as a testbed to study and evaluate LLM reasoning (Gandhi et al., 2024; 2025; Yao et al., 2023a). In a CountDown game, the player is given a set of candidate numbers and a target number (restricted to integers). The goal is to reach the target by applying a sequence of arithmetic operations—addition, subtraction, multiplication, or division—using the candidate numbers. Each number must be used exactly once, and intermediate results can be reused in subsequent operations.", + "bbox": [ + 169, + 330, + 826, + 417 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To algorithmically solve CountDown, we can represent the problem as a search tree (Fig. 2a). Each node in the search tree corresponds to a state defined by the current set of available numbers. At each step, the algorithm selects a pair of numbers from the set and applies one of the four operations, replacing the pair with the resulting value to create a new state. This process continues recursively until the target number is reached (correct leaf node) or all combinations are exhausted (wrong leaf node). In this work, we play the CountDown with four candidate numbers, and for each game, there are 1,152 possible search paths.", + "bbox": [ + 169, + 429, + 826, + 530 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1.2 Data generation", + "text_level": 1, + "bbox": [ + 171, + 542, + 344, + 558 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We generate backtracking traces with Depth First Search (DFS) with a sum-heuristic (Gandhi et al. (2024), further details in Appendix C.1). We generate a dataset of 500,000 CountDown questions, and the DFS search correctly solves $57\\%$ of the questions. The backtracking trace is a serialized version of DFS, listing all the tree nodes visited in the order of DFS traversal. To construct the direct solution training data, we prune the backtracking traces to keep only the correct solution path. With the pruning approach, we remove the exploratory parts of the trace while preserving the answer format and scaffolding used in the backtracking model, to ensure a fair comparison. We also ensure that the direct solution model does not see more solved CountDown games, we include only the 285,000 questions (i.e., $500,000 \\times 0.57$ ) that DFS successfully solves. We provide examples of both training data in Appendix H.", + "bbox": [ + 169, + 561, + 828, + 703 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 Suku", + "text_level": 1, + "bbox": [ + 171, + 715, + 272, + 729 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2.1 Game setup", + "text_level": 1, + "bbox": [ + 171, + 737, + 312, + 753 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Sudoku is another prototypical strategic game used to study reasoning and search in LLMs (Yao et al., 2023a; Long, 2023). In this work, we focus on hard $9 \\times 9$ Sudo boards, where only about 20 of the 81 cells are pre-filled, making the search space substantially larger (see Appendix C.1 for a description of Sudo rules). To algorithmically solve Sudo, we represent the problem as a search tree (Fig. 2b). Each node corresponds to a partial board state, where some cells have been filled. At each step, the algorithm selects an unfilled cell and fills it with a candidate digit that satisfies Sudo constraints in the current state. Each valid assignment creates a new child node representing the updated board. The process continues recursively until a complete, valid solution is reached (correct leaf node) or no valid moves remain (wrong leaf node). The depth of the tree corresponds to the number of empty cells, and the branching factor at each node depends on the number of unfilled cells as well as how many digits are valid for each unfilled cell.", + "bbox": [ + 169, + 757, + 826, + 925 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/6d48ef2be03c9c8fe24eb88ceffb6ab73372724258852629afc0ff253920d49b.jpg", + "image_caption": [ + "Figure 3: Backtracking and direct solution models implement different search strategies for CountDown. For test questions that model solves correctly, we measure the number of mistakes made (i.e., wrong terminal nodes visited) before finding the correct solution. We sort the test questions by number of mistakes made by DFS. Left: Trained on DFS traces, the number of mistakes made by the backtracking model correlates with the DFS. Middle: In contrast, the direct solution model solves a lot more problems with significantly fewer mistakes compared to DFS. Right: For a given number of mistakes made, we examine whether two models solve the same set of question as DFS. Direct solution model implements a search strategy significantly different from DFS." + ], + "image_footnote": [], + "bbox": [ + 174, + 99, + 385, + 220 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/e70f23b00318eb73998084c7cc03a262857c0b2c31e8da7e040b4feba0f83084.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 395, + 99, + 604, + 220 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/6c35e4c35a8fbc4b9bb884248ec42755727f5cb1a53b8128d768384367b79683.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 609, + 101, + 821, + 220 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2.2 Data generation", + "text_level": 1, + "bbox": [ + 171, + 382, + 344, + 397 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We follow the same procedure as CountDown to generate training data for both the backtracking and direct solution models. We use a DFS-based search algorithm, in combination with a Sudoku solver that applies seven common human strategies (e.g., naked singles, hidden pairs and etc, Papadimas. (2023)) to eliminate candidates for unfilled cells. At each node, we use the 7 strategies to eliminate candidates for unfilled cells, and then DFS chooses an unfilled cell, makes a guess and continues solving recursively. This process continues until the board is either solved or reaches a dead-end (i.e., an invalid state with no legal moves). We use a dataset of 3M Sudoku puzzles from (Radcliffe, 2020), and the combined DFS-solver approach successfully solves $98\\%$ of them. Since DFS successfully solves nearly all puzzles, we train both models on 2.8M examples and reserve the last 200K for validation and testing. We provide further details on Sudoku training data generation in Appendix C.1 and data examples in Appendix H.", + "bbox": [ + 169, + 398, + 826, + 566 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 Model and training", + "text_level": 1, + "bbox": [ + 171, + 575, + 359, + 590 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We use Qwen2.5-style model architectures (Yang et al., 2024) with RoPE positional encoding (Su et al., 2021) and Group Query Attention (GQA) (Ainslie et al., 2023). To maximize parameter efficiency, we design custom tokenizers for both games, significantly reducing the size of the language modeling head. This allows us to train smaller models than prior work (Gandhi et al., 2024; Shah et al., 2024) while maintaining comparable performance on both tasks. For CountDown, we use a 17M parameter model with a context length of 4096 tokens; for Selenium, we use a 38M model with the same context length. See Appendix C.3 for model architecture and an exhaustive list of training hyperparameters. We train all models until validation loss converges (see Appendix G.3).", + "bbox": [ + 169, + 593, + 826, + 720 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 Empirical trade-offs of backtracking", + "text_level": 1, + "bbox": [ + 169, + 732, + 529, + 750 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We first demonstrate that backtracking models do not universally outperform the direct solution models (Section 4.1) because backtracking models are restricted to learn a prescribed way of search (Section 4.2). We then identify two factors (Sections 4.3) showing how we might improve test-time scaling for backtracking models.", + "bbox": [ + 169, + 755, + 823, + 813 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 Backtracking is not always beneficial", + "text_level": 1, + "bbox": [ + 169, + 821, + 488, + 835 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Evaluation metrics. We evaluate model performances using solving accuracy on 200 unseen problems with binary scores (either correct or incorrect, no partial credits, see appendix C.1). We use FLOPs to compare inference costs (see Appendix D for FLOPs computation). For the backtracking model, we allow models to autoregressively generate and measure how many problems the model finds the correct solution at various CoT lengths (ranging from 1024 to 4096 tokens). For the direct solution model, we generate", + "bbox": [ + 169, + 839, + 825, + 925 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "$n$ solutions in parallel through temperature sampling at $T = 0.7$ , and examine whether the model has found the correct solution within $n$ attempts (i.e., best-of- $n$ ). Best-of- $n$ is a suitable choice in those two games, a case where solving the task is hard but verification is trivial. In general, our analysis applies to tasks where verification can be easily done with an external verifier at test-time. This is definitely not always the case, and we leave the study of problems where test-time verification is not as easy to future work. In those tasks, one might need to consider majority voting or other strategies. See Appendix E for further discussions.", + "bbox": [ + 169, + 103, + 826, + 214 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Results. In Fig. 1 A, we observe distinct scaling behaviors for the two models. For both games, the direct solution model's test accuracy scales linearly with increased test-time compute (measured on a logarithmic scale). This scaling behavior indicates that through parallel sampling, the backtracking model generates diverse solutions that search through different solution paths. Conversely, the backtracking model exhibits sub-linear scaling: Longer solution traces disproportionately yield smaller accuracy improvements. We attribute the sub-linear scaling to two causes. First, as reasoning chains become longer, the backtracking model might struggle to effectively track visited states and efficiently search through the solution space. Second, when models perform sequential search, the computation cost grows quadratically with CoT length (due to the attention mechanism, see Appendix D), and this further makes backtracking model less effective for scaling up test time compute. Overall, for CountDown, the direct solution model consistently outperforms its backtracking counterpart. However, this trend is reversed in Selenium, where the backtracking model consistently achieves higher accuracy.", + "bbox": [ + 169, + 222, + 826, + 419 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 Backtracking model learns both the good and the bad", + "text_level": 1, + "bbox": [ + 169, + 428, + 609, + 441 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "When teaching a child to correct math mistakes, the child understands that the goal is the correct answer—not making and then fixing errors. Humans have meta-cognitive awareness that models lack. Models trained via next-token prediction simply imitate the traces they see, including making the mistake before fixing it. In CountDown, this poses a key limitation: the backtracking model learns to follow the specific search paths seen in training. While some tasks—like shortest path finding—have optimal strategies we can supervise directly (e.g., Dijkstra's algorithm), most reasoning tasks, including CountDown, lack such guarantees. As a result, the model may be constrained by the inefficiencies in the backtracking data. In contrast, the direct solution model, trained only on correct answers, is free to discover more efficient strategies. In our subsequent analysis, we concretely show how the direct solution model successfully bypasses many inefficient search and backtracking steps learned by the backtracking model.", + "bbox": [ + 169, + 446, + 826, + 616 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2.1 Backtracking model finds the solution with fewer mistakes", + "text_level": 1, + "bbox": [ + 169, + 621, + 653, + 636 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Measuring number of mistakes. We compare the number of mistakes made by: (1) DFS (used to generate backtracking data), (2) the backtracking model, and (3) the direct solution model. For DFS and the backtracking model, mistakes are counted as the number of incorrect terminal nodes explored before finding the correct solution. For the direct solution model, mistakes correspond to how many parallel samples ( $n$ in best-of- $n$ ) are needed.", + "bbox": [ + 169, + 637, + 823, + 710 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Comparing search strategies. We sort the 200 test problems based on mistakes made by DFS and plot mistakes for both models. Fig. 3 left compares DFS search and backtracking model. The number mistakes made by the backtracking model is correlated with the DFS backtracking trace. This observation is not surprising given that the backtracking model is trained on these traces. However, this result is interesting when we compare it against the direct solution model (Fig. 3 middle). The direct solution model solves most problems within fewer than 10 attempts—far fewer compared to DFS or the backtracking model. Fig. 3 right quantifies these observations. Specifically, for a fixed mistake budget, we use Jaccard Index to measure whether the model solves a similar set of problems as DFS solves. The backtracking model closely mirrors DFS search (high set similarity), whereas the direct solution model diverges significantly (low set similarity). Together with superior performance of the direct solution model, we conclude that the direct solution model learns more efficient search strategies, avoiding unnecessary explorations of wrong paths.", + "bbox": [ + 169, + 718, + 826, + 901 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "1 Mistakes are counted only for problems solved correctly by the model.", + "bbox": [ + 189, + 909, + 658, + 924 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/f744918c025c250a03c698c53abc9f6b439ada25c09467401c31889e902dd09a.jpg", + "image_caption": [ + "Figure 4: Two different variations to improve backtracking model. (a). We hypothesize that the backtracking model can think one step ahead without sacrificing its ability to search. Therefore, we shorten the search trace by skipping the last search step. (b). Two data variations that improve the backtracking model. Mixed-backtrack model trained on a diverse set of search strategies. Think-backtracking model trained on shortened DFS trace." + ], + "image_footnote": [], + "bbox": [ + 178, + 104, + 537, + 308 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/64eea15ad12a2ff0fc19f7e2b733fda8065aa921d4af04dc08e790c09f82a37d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 107, + 826, + 305 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3 Two ways to improve backtracking model", + "text_level": 1, + "bbox": [ + 169, + 425, + 526, + 441 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Training on diverse set of search strategies. Our analysis suggests a clear direction for improving the backtracking model: using better search strategies to improve backtracking traces. Beyond DFS, we explored alternatives including Breadth-First Search (BFS) and various heuristic methods (see Appendix G.1). Despite these efforts, no single search strategy significantly outperformed DFS. Inspired by Gandhi et al. (2024), we trained a variant of the backtracking model—mix-backtrack model—using a diverse mixture of BFS and DFS strategies (32 in total), aiming to help the model discover more optimal search patterns.", + "bbox": [ + 169, + 446, + 826, + 559 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Backtracking model thinks less and talks more. Apart from learning suboptimal search strategies, another inefficiency in the backtracking model is caused by the model learns to be excessively verbose. Specifically, by requiring the model to explicitly output every step of the DFS, we may prevent it from internalizing part of the reasoning process. Concretely, we hypothesize that for CountDown, the model can internally plan at least one step ahead, allowing it to shorten its explicit reasoning trace without losing its ability to perform DFS. To test hypothesis, we train a variation—the think-backtrack model—on shortened DFS traces, skipping one intermediate step (Fig. 4, A).", + "bbox": [ + 169, + 571, + 828, + 686 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Mix-strategy results. Fig. 4 (B) compares this mixed-strategy model against the original backtracking and direct solution models. We also include a training data upper bound, representing perfect execution of the mixed search strategies. The mixed-strategy model improves over the original backtracking model and closely approaches its training-data upper bound. However, even with deliberate attempts to optimize search strategies, surpassing the performance of the direct solution model remains challenging. This experiment underscores the inherent difficulty in identifying superior handcrafted search traces.", + "bbox": [ + 169, + 699, + 828, + 799 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Think-backtrack results. Fig. 4 (B) also compares the performance of the think-backtrack model. By encouraging the model to internalize parts of the reasoning process, the think-backtrack model achieves performances comparable to the direct solution model. This result suggests that models with backtracking ability might produce long but ineffective CoT. By training the model to avoid making the mistakes at the first place, we reduce model morbidity without sacrificing its search capability, and in turn improving test-time-compute scaling. As an additional evidence, in Appendix G.2, we show that the think-backtrack model solves a superset of test problems solved by the original backtrack model.", + "bbox": [ + 169, + 811, + 828, + 926 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 491, + 946, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/9b1b98b03adf16268a74f8dec7aaeb6508fe5869505875ad5daa40391ef2fe85.jpg", + "image_caption": [ + "Figure 5: Different scaling behaviors for backtracking versus direct solution model. CountDown (A). Backtracking model performance does not improve as we scale up model size. (B). The direct solution model improves (C). Direct solution model consistently outperforms backtracking model. Sodomu (D, E). Both models' performances improve as we scale up model size. (F). Direct solution model consistently underperforms backtracking model." + ], + "image_footnote": [], + "bbox": [ + 174, + 103, + 823, + 481 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 Model size and tree depth impact the efficacy of backtracking", + "text_level": 1, + "bbox": [ + 169, + 592, + 756, + 611 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "While we've shown that backtracking might lead to ineffective test-time scaling, other factors also shape its effectiveness. In Section 5.1, we show that backtracking and direct solution models scale differently with model sizes. To explain the contrasting outcomes (Fig. 1 A) between CountDown and Selenium, in Appendix F, we show that task differences—particularly search tree depth—play a key role: deeper tasks like Selenium benefit more from backtracking.", + "bbox": [ + 169, + 617, + 826, + 703 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.1 Dependence on model size", + "text_level": 1, + "bbox": [ + 171, + 713, + 415, + 728 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We now investigate how model size impacts the performance of backtracking and direct solution models. We evaluate four model scales—3M, 17M, 38M, and 144M—by proportionally increasing the number of attention heads, embedding dimensions, and number of attention layers. Detailed model configurations can be found in Appendix C.2.", + "bbox": [ + 169, + 732, + 826, + 790 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "CountDown. Scaling up model size improves the performance of the direct solution model (Fig. 5 B) across all test-time-compute budgets. When trained exclusively on correct solutions, larger models can independently discover highly effective search strategies. In contrast, the backtracking model shows no improvements with increased model sizes (Fig. 5 A). The lack of improvement from model scaling can be explained by training data: The performance of backtracking model is constrained by the quality of the backtracking traces used for training. As previously seen in Fig. 4 (right), the 17M backtracking model is already approaching the performance ceiling that is set by the training data. Training larger models on the same backtracking data would not lead to further performance improvements. Due", + "bbox": [ + 169, + 797, + 826, + 925 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "to different scaling behaviors between backtracking and direct solution models, the gap in performances between two types of models widens with increasing model sizes (Fig. 5 C).", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Sudoku. Similar to CountDown, the performances of direct solution models improve with increased model sizes (Fig. 5 E). Unlike CountDown, however, the backtracking model also significantly benefits from scaling (Fig. 5 D). This difference can again be explained by examining the backtracking training data. Sodomu is inherently more complex than CountDown. The DFS backtracking traces successfully solve $97\\%$ of test boards—far exceeding the current performance of all four tested model sizes. Because the backtracking model for Sodomu has not yet reached training data performance ceiling, increased model capacity leads to improved results. On the other hand, due to the complexity and large search space of the game, the backtracking models' performance gains start to diminish as the search traces become longer. As a result, the backtracking model consistently outperforms the direct solution model across scales, but the advantages diminishes at larger compute budgets (Fig. 5 E).", + "bbox": [ + 169, + 140, + 826, + 295 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 GRPO: Learning beyond the imitation game", + "text_level": 1, + "bbox": [ + 169, + 308, + 602, + 327 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "So far, we have shown that under supervised learning, backtracking is not always optimal for scaling test-time compute. We now explore how further training both backtracking and direct solution models with reinforcement learning leads to qualitatively different outcomes.", + "bbox": [ + 169, + 333, + 828, + 378 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6.1 Continue training models with GRPO", + "text_level": 1, + "bbox": [ + 169, + 386, + 496, + 402 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Recently, RL has become a popular approach to further enhance LLMs performance on challenging benchmarks such as MATH (Hendrycks et al., 2021) and AIME (AIME, 2024). Here, we study the effects of RL in a controlled setting, focusing on how it impacts a model's backtracking behaviors (sequential search) and as well as a model's parallel search capability (sampling with best-of- $n$ ). We take the CountDown backtracking and direct solution models, which have been trained to convergence under the supervised learning objective (see Appendix G.3 for training curves). We then continue training each model using GRPO (Shao et al., 2024), following verl's (Sheng et al., 2024) implementation. We perform GRPO on the same training data used for the supervised learning. As before, we evaluate performance across different test-time compute budgets.", + "bbox": [ + 169, + 405, + 826, + 546 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6.2 Backtracking model discovers new search strategies", + "text_level": 1, + "bbox": [ + 169, + 556, + 598, + 571 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Figure 1 C shows that the backtracking model post GRPO sees an performance boost across all test-compute budgets. The post-GRPO model (dark red) reaches an accuracy comparable to the pre-GRPO direct solution model (light blue). This improvement is surprising for two reasons: (1) at maximum compute (4096 tokens), the model solves nearly $70\\%$ of the test set—exceeding the performance of the DFS strategy used to generate training data $(57\\%)$ ; and (2) the model was trained on questions it has already seen during supervised learning, with no new problems introduced during GRPO.", + "bbox": [ + 169, + 574, + 826, + 674 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "These gains suggest that the backtracking model, once freed from the constraints of predicting next token on DFS traces, can now discover better search strategies. To concretely show that the backtracking model post-GRPO learns search strategies different from DFS training traces, we revisit the mistake-counting analysis from Section 4.2.1 (Figure 3). For each test problem, we compute the number of mistakes as before (i.e., counting how many incorrect terminal nodes are explored before reaching a correct solution). Using the same set similarity measure as before, we quantify the strategy deviation in Figure 6 (left). The smaller Jaccard index values confirm that through GRPO, the backtracking model has learned new and more effective search behaviors. In Appendix G.4, we also show the per-problem scatter plot as done in Figure 3.", + "bbox": [ + 169, + 684, + 826, + 825 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6.3 Direct solution model specializes at pass@1", + "text_level": 1, + "bbox": [ + 169, + 834, + 535, + 849 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We now show that compared to backtracking models, GRPO has remarkably different effects on direct solution models. As shown in Figure 1 C, the direct solution model post-GRPO achieves strong performance at the smallest compute budget (pass@1), solving $42.5\\%$ of unseen CountDown puzzles (82 out of 200 test problems). None of the handcrafted search strategies (Appendix G.1) can reach such high accuracy. To understand the impressive gain", + "bbox": [ + 169, + 853, + 825, + 926 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/74d0920a8d16477d6e33f418d0398f280da866ebfc2cffcf91324a04375b76a2.jpg", + "image_caption": [ + "Figure 6: GRPO has different effect on backtracking versus direct solution model Left: After GRPO, the backtracking model's search strategy starts to deviate away from the DFS search. Right: For problems the pre-GRPO direct solution model (blue) have a non-zero pass@k solving probabilities, the post-GRPO direct solution model (red) solves with pass@1." + ], + "image_footnote": [], + "bbox": [ + 178, + 99, + 509, + 263 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/58fd3a6281a1bb9dfa15bece1a73c9939603a56ca0f044b45ec84822d61fe0f3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 545, + 101, + 818, + 263 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "on 1-shot performance, we examine those 82 problems, and discover that the pre-GRPO direct solution model was able to find correct solution by sampling best-of- $n$ (with $n \\leq 64$ ). We now examine a model's solving probabilities (i.e., measuring pass@k rate out of the 64 generations). We compare the pass@k rate for the diret solution model pre and post GRPO, shown in Figure 6, right. We rank the 200 test problems by the pre-GRPO model's solving probabilities. For problems that the pre-GRPO model has a non-zero pass@k rate, the post-GRPO model can solve most of them with pass@1.", + "bbox": [ + 169, + 359, + 826, + 460 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "However, this improvement in 1-shot performance comes with a substantial trade-off: the model loses its ability to generate diverse solutions. As a result, when we perform parallel search using best-of- $n$ , the direct solution model post-GRPO fail to explore different solution paths, hurting its test-time-scaling effectiveness. Therefore, test-time compute scaling becomes ineffective as we increase compute budgets, forming a sharp contrast to the backtracking model's consistent improvements across the full compute budget.", + "bbox": [ + 169, + 469, + 823, + 556 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "7 Conclusion and discussions", + "text_level": 1, + "bbox": [ + 171, + 566, + 454, + 583 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this work, we conducted a controlled empirical investigation into the efficacy of teaching backtracking to large language models (LLMs) as a method for scaling test-time computation. Using two strategic games, CountDown and Sudoku, we demonstrated that backtracking does not universally outperform parallel solution strategies; rather, its effectiveness depends significantly on task characteristics, model scale, and training approach. Appendix A, we show that our results in synthetic setting generalize: even in real-world reasoning tasks, backtracking is not always beneficial. Additionally, our reinforcement learning experiments uncovered a unique synergy between backtracking capabilities and RL-based training, enabling models to discover novel strategies.", + "bbox": [ + 169, + 590, + 826, + 717 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Limitations and future work. While our experiments relied on two strategic games (Count-Down and Sudo) and models trained from scratch—common practices for controlled studies—an important avenue for future research is extending our findings to complex, real-world reasoning tasks such as coding and mathematical problem-solving. For future work, developing precise ways to characterize tasks that benefit from backtracking will be valuable for guiding model training. Finally, while we intentionally created a dichotomy between pure backtracking and direct-solution models, real-world applications may require hybrid strategies that dynamically choose between direct generation and explicit search based on problem complexity. Investigating whether LLMs can autonomously optimize their reasoning modes, particularly through reinforcement learning paradigms, is a promising future direction for improving the flexibility and efficiency of model reasoning.", + "bbox": [ + 169, + 724, + 826, + 878 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 171, + 102, + 346, + 119 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We thank Core Francisco Park and Bingbin Liu for helpful discussions and feedback throughout the development of this work. TQ and DAM acknowledge support from the Kempner Institute, the Aramont Fellowship Fund, and the FAS Dean's Competitive Fund for Promising Scholarship.", + "bbox": [ + 169, + 125, + 826, + 184 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 171, + 196, + 274, + 212 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "AIME. American invitational mathematics examination, 2024. URL https://maa.org/math-competitions/american-invitational-mathematics-examination-aime.", + "Joshua Ainslie, James Lee-Thorp, Michiel de Jong, Yury Zemlyanskiy, Federico Lebrón, and Sumit Sanghai. GQA: Training generalized multi-query transformer models from multi-head checkpoints. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2305.13245.", + "Afra Feyza Akyurek, Ekin Akyurek, Aman Madaan, Ashwin Kalyan, Peter Clark, Derry Wijaya, and Niket Tandon. RL4F: Generating natural language feedback with reinforcement learning for repairing model outputs. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2305.08844.", + "Zachary Ankner, Mansheej Paul, Brandon Cui, Jonathan D Chang, and Prithviraj Ammanabrolu. Critique-out-loud reward models. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2408.11791.", + "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2407.21787.", + "Sehyun Choi, Tianqing Fang, Zhaowei Wang, and Yangqiu Song. KCTS: Knowledge-constrained tree search decoding with token-level hallucination detection. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2310.09044.", + "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z F Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J L Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R J Chen, R L Jin, Ruyi Chen, Shanghai Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, S S Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun, T Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W L Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X Q Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y K Li, Y Q Wang, Y X Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong. Ying He,Yishi Piao,Yisong Wang,Yixuan Tan,Yiyang Ma,Yiyuan Liu,Yongqiang Guo. Yuan OuYuduan Wang Yue Gong.Yuheng ZouYujia HeYunfan XiongYuxiang Luo. Yuxiang YouYuxuan LiuYuyang ZhouYX ZhuYanhong Xu,Yanping Huang,Yaohui Li,Yi Zheng,Yuchen Zhu,Yunxian Ma,Ying Tang,Yukun Zha,Yuting YanZ Z Ren Zehui Ren,Zhangli Sha,Zhe Fu,Zhean XuZhenda XieZhengyan Zhang,Zhenwen Hao. Zhicheng Ma,Zhigang Yan,Zhiyu WuZihui GuZijia ZhuZijun LiuZilin LiZiwei Xie" + ], + "bbox": [ + 173, + 223, + 826, + 924 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ziyang Song, Zizheng Pan, Zhen Huang, Zhipeng Xu, Zhongyu Zhang, and Zhen Zhang. DeepSeek-R1: Incentivizing reasoning capability in LLMs via reinforcement learning. arXiv [cs.CL], 2025. URL http://arxiv.org/abs/2501.12948.", + "Kanishk Gandhi, Denise Lee, Gabriel Grand, Muxin Liu, Winson Cheng, Archit Sharma, and Noah D Goodman. Stream of search (SoS): Learning to search in language. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2404.03683.", + "Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective STaRs. arXiv [cs.CL], 2025. URL http://arxiv.org/abs/2503.01307.", + "Alex Havrilla, Sharath Raparthy, Christoforus Nalmpantis, Jane Dwivedi-Yu, Maksym Zhuravinskyi, Eric Hambro, and Roberta Raileanu. GLoRe: When, where, and how to improve LLM reasoning via global and local refinements. arXiv [cs.CL], 2024. URL http://arxiv.org/abs/2402.10963.", + "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the MATH dataset. arXiv [cs.LG], 2021. URL http://arxiv.org/abs/2103.03874.", + "Zhenyu Hou, Xin Lv, Rui Lu, Jiajie Zhang, Yujiang Li, Zijun Yao, Juanzi Li, Jie Tang, and Yuxiao Dong. Advancing language model reasoning through reinforcement learning and inference scaling. arXiv [cs.LG], 2025. URL http://arxiv.org/abs/2501.11651.", + "Robert Irvine, Douglas Boubert, Vyas Raina, Adrian Liusie, Ziyi Zhu, Vineet Mudupalli, Aliaksei Korshuk, Zongyi Liu, Fritz Cremer, Valentin Assassi, Christie-Carol Beauchamp, Xiaoding Lu, Thomas Rialan, and William Beauchamp. Rewarding chatbots for real-world engagement with millions of users. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2303.06135.", + "Amirhossein Kazemnejad, Milad Aghajohari, Eva Portelance, Alessandro Sordoni, Siva Reddy, Aaron Courville, and Nicolas Le Roux. VinePPO: Unlocking RL potential for LLM reasoning through refined credit assignment. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2410.01679.", + "Aviral Kumar, Vincent Zhuang, Rishabh Agarwal, Yi Su, John D Co-Reyes, Avi Singh, Kate Baumli, Shariq Iqbal, Colton Bishop, Rebecca Roelofs, Lei M Zhang, Kay McKinney, Disha Shrivastava, Cosmin Paduraru, George Tucker, Doina Precup, Feryal Behbahani, and Aleksandra Faust. Training language models to self-correct via reinforcement learning. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2409.12917.", + "Ariel N Lee, Cole J Hunter, and Nataniel Ruiz. Platypus: Quick, cheap, and powerful refinement of LLMs. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2308.07317.", + "Kuang-Huei Lee, Ian Fischer, Yueh-Hua Wu, Dave Marwood, Shumeet Baluja, Dale Schuurmans, and Xinyun Chen. Evolving deeper LLM thinking. arXiv [cs.AI], 2025. URL http://arxiv.org/abs/2501.09891.", + "Noam Levi. A simple model of inference scaling laws. arXiv [stat.ML], 2024. URL http://arxiv.org/abs/2410.16377.", + "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. arXiv [cs.LG], 2023. URL http://arxiv.org/abs/2305.20050.", + "Jiacheng Liu, Andrew Cohen, Ramakanth Pasunuru, Yejin Choi, Hannaneh Hajishirzi, and Asli Celikyilmaz. Don't throw away your value model! generating more preferable text with value-guided monte-carlo tree search decoding. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2309.15028.", + "Jieyi Long. Large language model guided tree-of-thought. arXiv [cs.AI], 2023. URL http://arxiv.org/abs/2305.08291." + ], + "bbox": [ + 171, + 102, + 826, + 922 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Meiqi Guo, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, and Abhinav Rastogi. Improve mathematical reasoning in language models by automated process supervision. arXiv [cs.CL], 2024. URL http://arxiv.org/abs/2406.06592.", + "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv [cs.CL], 2025. URL http://arxiv.org/abs/2501.19393.", + "Aleksei Maslakov And Papadimas. Sudoku solver with step-by-step guidance, 2023. URL https://github.com/unmade/dokusan, 2023.", + "Debjit Paul, Mete Ismayilzada, Maxime Peyrard, Beatrix Borges, Antoine Bosselut, Robert West, and Boi Faltings. REFINER: Reasoning feedback on intermediate representations. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2304.01904.", + "Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive IntroSpEction: Teaching language model agents how to self-improve. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2407.18219.", + "David Radcliffe. 3 million crossword puzzles with ratings. Website, 2020. URL https://www.kaggle.com/datasets/radcliffe/3-million-sudoku-puzzles-with-ratings.", + "William Saunders, Catherine Yeh, Jeff Wu, Steven Bills, Long Ouyang, Jonathan Ward, and Jan Leike. Self-critiquing models for assisting human evaluators. arXiv [cs.CL], 2022. URL http://arxiv.org/abs/2206.05802.", + "Kulin Shah, Nishanth Dikkala, Xin Wang, and Rina Panigrahy. Causal language modeling can elicit search and reasoning capabilities on logic puzzles. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2409.10502.", + "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Mingchuan Zhang, Y K Li, Y Wu, and Daya Guo. DeepSeekMath: Pushing the limits of mathematical reasoning in open language models. arXiv [cs.CL], 2024. URL http://arxiv.org/abs/2402.03300.", + "Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. HybridFlow: A flexible and efficient RLHF framework. arXiv preprint arXiv: 2409.19256, 2024.", + "Jianlin Su, Yu Lu, Shengfeng Pan, Ahmed Murtadha, Bo Wen, and Yunfeng Liu. RoFormer: Enhanced transformer with rotary position embedding. arXiv [cs.CL], 2021. URL http://arxiv.org/abs/2104.09864.", + "Peiyi Wang, Lei Li, Zhihong Shao, Runxin Xu, Damai Dai, Yifei Li, Deli Chen, Yu Wu, and Zhifang Sui. Math-shepherd: Verify and reinforce LLMs step-by-step without human annotations. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 9426-9439, Stroudsburg, PA, USA, 2024. Association for Computational Linguistics. URL http://dx.doi.org/10.18653/v1/2024.acl-long.510.", + "Tianlu Wang, Ping Yu, Xiaoqing Ellen Tan, Sean O'Brien, Ramakanth Pasunuru, Jane Dwivedi-Yu, Olga Golovneva, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. Shepherd: A critic for language model generation. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2308.04592.", + "Sean Welleck, Ximing Lu, Peter West, Faeze Brahman, Tianxiao Shen, Daniel Khashabi, and Yejin Choi. Generating sequences by learning to self-correct. arXiv [cs.CL], 2022. URL http://arxiv.org/abs/2211.00053.", + "Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models. arXiv [cs.AI], 2024. URL http://arxiv.org/abs/2408.00724." + ], + "bbox": [ + 171, + 102, + 826, + 925 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Yuxi Xie, Kenji Kawaguchi, Yiran Zhao, Xu Zhao, Min-Yen Kan, Junxian He, and Qizhe Xie. Self-evaluation guided beam search for reasoning. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2305.00633.", + "Huajian Xin, Daya Guo, Zhihong Shao, Zhizhou Ren, Qihao Zhu, Bo Liu, Chong Ruan, Wenda Li, and Xiaodan Liang. DeepSeek-prover: Advancing theorem proving in LLMs through large-scale synthetic data. arXiv [cs.AI], 2024. URL http://arxiv.org/abs/2405.14333.", + "An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, Keming Lu, Keqin Bao, Kexin Yang, Le Yu, Mei Li, Mingfeng Xue, Pei Zhang, Qin Zhu, Rui Men, Runji Lin, Tianhao Li, Tingyu Xia, Xingzhang Ren, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yu Wan, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zihan Qiu. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024.", + "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. arXiv [cs.CL], 2023a. URL http://arxiv.org/abs/2305.10601.", + "Weiran Yao, Shelby Heinecke, Juan Carlos Niebles, Zhiwei Liu, Yihao Feng, Le Xue, Rithesh Murthy, Zeyuan Chen, Jianguo Zhang, Devansh Arpit, Ran Xu, Phil Mui, Huan Wang, Caiming Xiong, and Silvio Savarese. Retroformer: Retrospective large language agents with policy gradient optimization. arXiv [cs.CL], 2023b. URL http://arxiv.org/abs/2308.02151.", + "Seonghyeon Ye, Yongrae Jo, Doyoung Kim, Sungdong Kim, Hyeonbin Hwang, and Minjoon Seo. SelFee: Iterative self-revising LLM empowered by self-feedback generation. Blog post, 2023. URL https://kaistai.github.io/SelFee/.", + "Eric Zelikman, Yuhuai Wu, Jesse Mu, and Noah D Goodman. STaR: Bootstrapping reasoning with reasoning. arXiv [cs.LG], 2022. URL http://arxiv.org/abs/2203.14465.", + "Shun Zhang, Zhenfang Chen, Yikang Shen, Mingyu Ding, Joshua B Tenenbaum, and Chuang Gan. Planning with large language models for code generation. arXiv [cs.LG], 2023. URL http://arxiv.org/abs/2303.05510.", + "Yunxiang Zhang, Muhammad Khalifa, Lajanugen Logeswaran, Jaekyeom Kim, Moontae Lee, Honglak Lee, and Lu Wang. Small language models need strong verifiers to self-correct reasoning. arXiv [cs.CL], 2024. URL http://arxiv.org/abs/2404.17140.", + "Andy Zhou, Kai Yan, Michal Shlapentokh-Rothman, Haohan Wang, and Yu-Xiong Wang. Language agent tree search unifies reasoning acting and planning in language models. arXiv [cs.AI], 2023. URL http://arxiv.org/abs/2310.04406." + ], + "bbox": [ + 171, + 102, + 826, + 693 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/c3231bcd3f7a4377e0bb50797ec0d78ce075ede1cd66eeda2dce0e11b6a62c45.jpg", + "image_caption": [ + "Math-500 Performance vs. Test Compute" + ], + "image_footnote": [], + "bbox": [ + 212, + 117, + 483, + 276 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/90573dfe71e77f9da3d2327c2e68b2211bc164de4f3632147e1b10f7612d2631.jpg", + "image_caption": [ + "GPQA Performance vs. Test Compute", + "Figure 7: Evaluating backtracking on real LLMs. Left: On MATH-500, we compare the S1 model (fine-tuned on backtracking traces) using sequential decoding with budget forcing, against its base model (Qwen2.5-32B-Instruct) using parallel sampling. The backtracking model underperforms at low compute but narrows the gap at higher budgets. Right: On GPQA, the same backtracking setup outperforms parallel sampling in a multiple-choice reasoning setting. This comparison generalizes our conclusion from synthetic settings to real LLMs." + ], + "image_footnote": [], + "bbox": [ + 506, + 116, + 777, + 275 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A Backtracking Analysis on Math Reasoning with LLMs", + "text_level": 1, + "bbox": [ + 171, + 411, + 694, + 430 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.1 Experimental Setup", + "text_level": 1, + "bbox": [ + 171, + 436, + 366, + 453 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "To complement our synthetic experiments, we conduct an evaluation on real-world math problems to examine whether backtracking remains effective under equal test-time compute. We compare two approaches:", + "bbox": [ + 169, + 455, + 826, + 500 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Backtracking model: fine-tuned on solution traces that include explicit self-correction and step-by-step reflection.", + "- Direct solution model: the base model without backtracking fine-tuning, using parallel sampling (with majority voting for final correct answer) at inference." + ], + "bbox": [ + 174, + 511, + 823, + 570 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "To control test-time compute, we use the budget forcing technique introduced in (Muen-nighoff et al., 2025). This enables a fair comparison across models with differing reasoning styles.", + "bbox": [ + 169, + 583, + 826, + 627 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Backtracking Model. We adopt the S1 checkpoint from Muennighoff et al. (2025), a model trained on solution traces distilled from DeepSeekR1. These traces exhibit explicit backtracking behaviors—identifying and correcting earlier mistakes. We generate outputs with temperature $T = 0.7$ under budget forcing and evaluate on the MATH-500.", + "bbox": [ + 169, + 633, + 825, + 691 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Direct Solution Model. For fair comparison, we use the same base model as S1—Qwen2.5-32B-Instruct—without backtracking fine-tuning. We sample $N = 1$ to 8 completions with temperature $T = 0.7$ , and report both Best-of- $N$ and Majority Vote accuracy.", + "bbox": [ + 169, + 699, + 828, + 743 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Results and Interpretation", + "text_level": 1, + "bbox": [ + 171, + 752, + 372, + 768 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Figure 7 (left) presents accuracy under matched compute budgets. We observe that at low compute budgets the backtracking model underperforms due to its verbose reasoning traces. At higher budgets, backtracking matches and slightly exceeds the performance of parallel sampling. This mirrors trends observed in the CountDown (Section 4.3), and suggests that while backtracking introduces overhead, it yields benefits when sufficient compute is available.", + "bbox": [ + 169, + 771, + 825, + 856 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "To form a sharp contrast, we reproduce results from (Muennighoff et al., 2025) on GPQA-Diamond (Figure 7, right), which shows that the same backtracking model significantly outperforms parallel sampling—even at lower budgets—in a multiple-choice setting. This contrast highlights that the effectiveness of backtracking is task-dependent.", + "bbox": [ + 169, + 867, + 826, + 925 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 31, + 517, + 47 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "This real-world evaluation supports our synthetic findings: backtracking improves performance under compute constraints, but its advantage depends on the task structure. On open-ended math problems, the benefit is most pronounced at higher budgets. On structured tasks like multiple-choice QA, gains can appear even earlier. Overall, our conclusions generalize beyond synthetic settings.", + "bbox": [ + 169, + 103, + 826, + 176 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B Related Work Extended", + "text_level": 1, + "bbox": [ + 171, + 186, + 419, + 203 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.1 Test-time computation scaling", + "text_level": 1, + "bbox": [ + 171, + 210, + 441, + 227 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A growing body of work has explored how to improve language model performance by scaling test-time computation. These approaches typically fall into two broad categories: parallel and sequential search. Parallel methods sample multiple solutions independently and select the best one using predefined criteria—such as majority voting or external reward models—as seen in Best-of- $N$ techniques (Brown et al., 2024; Irvine et al., 2023; Levi, 2024). These methods often rely on outcome-based reward models that score complete solutions (Xin et al., 2024; Ankner et al., 2024).", + "bbox": [ + 169, + 229, + 826, + 328 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In contrast, sequential methods iteratively refine reasoning by conditioning on previous attempts. This class includes stepwise improvement methods(Ankner et al., 2024; Hou et al., 2025; Lee et al., 2025), where each new trajectory builds on earlier outputs, enabling the model to adapt its reasoning dynamically. Other research works have also explored using the search process itself to improve model reasoning capabilities, either during inference or by integrating the feedback into training(Wang et al., 2024; Luo et al., 2024). While these methods can reduce redundancy, they typically require more compute per sample and may suffer from compounding errors.", + "bbox": [ + 169, + 339, + 826, + 454 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Tree-based approaches, such as Monte Carlo Tree Search (MCTS) and guided beam search, represent a hybrid between parallel and sequential strategies(Gandhi et al., 2024; Liu et al., 2023; Zhang et al., 2023; Zhou et al., 2023; Choi et al., 2023; Xie et al., 2023). These methods often leverage process reward models, which assign value to intermediate reasoning steps rather than full outputs(Lightman et al., 2023; Wang et al., 2024; Wu et al., 2024). REBASE(Wu et al., 2024), for example, uses a process reward model to guide exploration and pruning in tree search, and has been shown to outperform both sampling-based methods and traditional MCTS.", + "bbox": [ + 169, + 462, + 826, + 575 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.2 Self-correction and backtracking", + "text_level": 1, + "bbox": [ + 171, + 584, + 459, + 599 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Search and backtracking are inherently tied to self-correction, as they enable models to revisit earlier decisions and recover from errors—a critical capability for multi-step reasoning. Teaching language models to self-correct has been approached through fine-tuning on revision demonstrations from humans or stronger models(Saunders et al., 2022; Ye et al., 2023; Qu et al., 2024), as well as through synthetic data generation and handcrafted augmentation(Paul et al., 2023; Wang et al., 2023; Lee et al., 2023). Reward-based methods provide another avenue, using outcome- or process-level signals to differentiate good and bad reasoning trajectories, often framed as implicit policy learning(Welleck et al., 2022; Akyurek et al., 2023; Zhang et al., 2024). Some methods further incorporate search, critique generation, or separate correction modules to enhance reasoning quality(Yao et al., 2023b; Havrilla et al., 2024). In contrast, using two structured games, we investigate the tradeoffs of teaching models to backtrack via search traces versus allowing them to learn purely from correct solutions.", + "bbox": [ + 169, + 602, + 828, + 784 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B.3 Reinforcement learning for LLM reasoning", + "text_level": 1, + "bbox": [ + 171, + 792, + 537, + 809 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Reinforcement learning (RL) has emerged as a powerful framework for improving the reasoning abilities of language models. While early work applied off-policy and on-policy RL methods to guide models toward verifiable outcomes(Zelikman et al., 2022; Kazemnejad et al., 2024), recent approaches have shown that even simplified algorithms like GRPO can lead to significant performance gains and the emergence of in-context search behavior (DeepSeek-AI et al., 2025; Shao et al., 2024; DeepSeek-AI et al., 2025). These advances suggest that RL can help models autonomously discover more effective reasoning strategies, even without explicit reward models or structured search. However, not all models benefit", + "bbox": [ + 169, + 811, + 826, + 926 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 948, + 508, + 960 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "equally from RL, and it remains unclear what properties make a model amenable to learning through reinforcement. Our work contributes to this question by examining how backtracking models, when trained with GRPO, can discover novel solution strategies—while no-backtracking models show limited or mixed gains.", + "bbox": [ + 169, + 103, + 826, + 161 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C Experiment details", + "text_level": 1, + "bbox": [ + 169, + 175, + 380, + 193 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.1 Additional details on game, data generation", + "text_level": 1, + "bbox": [ + 169, + 200, + 542, + 217 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "CountDown tree size computation. CountDown has an exponentially growing search space with respect to the number of candidate numbers. If the current state has $N$ available numbers, there are $\\binom{N}{2} \\times 4$ possible actions (selecting a pair and one of four operations), and the depth of the tree is $N - 1$ . For games with four candidate numbers, the complete search tree contains 1,152 nodes.", + "bbox": [ + 169, + 220, + 826, + 294 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "CountDown search. To generate DFS search data, we use a sum heuristic to guide the search order and prune nodes. This heuristic measures the distance between the sum of all input numbers and the target number, and prunes a node if the absolute distance exceeds the target. This approach is inspired by Gandhi et al. (2024), who also consider an alternative—the multiply heuristic—which measures the minimum distance between the input set and the factors of the target. However, in our experiments, both heuristics yield similar performance: for a fixed number of visited nodes, DFS with either heuristic solves approximately the same number of games.", + "bbox": [ + 169, + 304, + 823, + 417 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Sudoku rule. In a Sudoku game, the player is given a $9 \\times 9$ grid in which each cell must be filled with a digit from 1 to 9. The puzzle is subject to three constraints: each row, each column, and each of the nine $3 \\times 3$ subgrids must contain all digits from 1 to 9 exactly once. Given a partially filled grid, the objective is to fill in the remaining cells such that all constraints are satisfied.", + "bbox": [ + 169, + 426, + 823, + 497 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Sudoku data and tokenization. To represent theiku board for language models, we encode each cell as a position-value pair: $(x,y) = v$ , where $(x,y)$ denotes the grid location and $v$ is the cell's value. The model receives the initial board as a list of known $(x,y) = v$ pairs and generates the solution by predicting the values for the remaining cells. We generate backtracking traces by serializing the full DFS traversal. For the direct solution model, we prune each trace to include only the final solution path.", + "bbox": [ + 169, + 507, + 826, + 592 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Scoring. For CountDown, a solution is correct only if it adheres to game rules and correctly achieves the target number. For Selenium, correctness requires fully solving the board, with no partial credit given for incomplete but correct boards. Models are tested on 200 unseen problems per game. The same scoring function is used as the reward function in GRPO (Section 6)", + "bbox": [ + 169, + 601, + 823, + 672 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.2 Additional details on model architecture", + "text_level": 1, + "bbox": [ + 169, + 683, + 519, + 698 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Model hyperparameters can be found in Table 1.", + "bbox": [ + 169, + 703, + 524, + 719 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/6d58a67cb14836a8528234812303ed4b872557173c0aaf09e6fd2050fe82eb1f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Model SizeHidden SizeLayersAttn HeadsIntermediate SizeKV Heads
3M256645121
17M5128410241
38M51210820482
144M102412830722
", + "bbox": [ + 187, + 734, + 810, + 821 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 1: Qwen2.5-style architecture configurations for the four model sizes used in our experiments.", + "bbox": [ + 169, + 830, + 823, + 859 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "C.3 Training hyperparameter", + "text_level": 1, + "bbox": [ + 169, + 875, + 406, + 891 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Training hyperparameters can be found in Table 2. We train all models on 2 NVIDIA H100 80GB HBM3 GPUs.", + "bbox": [ + 169, + 893, + 823, + 922 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/69ac0e6e576a49a10af498ffd099c72ba54c2b238e83d3d324692aeb4a963b1e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
HyperparameterValue
Optimization
Learning rate1 × 10-5
Weight decay0.01
Learning Rate Schedule
Scheduler typeCosine
Warmup steps1
Training Setup
Epochs30
Batch size (backtracking model)32
Batch size (direct solution model)64
Context length (backtracking model)4096
Context length (direct solution model)512
Tokenizer
Tokenizer size (CountDown)74
Tokenizer size (Sudoku)110
", + "bbox": [ + 316, + 101, + 679, + 340 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 2: Training hyperparameters used for all experiments. Batch size and context length vary based on model type.", + "bbox": [ + 169, + 349, + 823, + 380 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "D FLOP computation", + "text_level": 1, + "bbox": [ + 171, + 402, + 382, + 420 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "To compare backtracking and direct solution models under a fixed compute budget, we estimate inference FLOPs based on model architecture and generation length $T$ . We use a simplified transformer FLOP computation that accounts for per-token operations across all layers.", + "bbox": [ + 169, + 426, + 826, + 482 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Below is a list of architectural and generation parameters:", + "bbox": [ + 171, + 493, + 591, + 510 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- $d_{\\mathrm{model}}$ : hidden dimension", + "$d_{\\mathrm{kv}}$ : key/value dimension", + "- $d_{\\mathrm{ff}}$ : intermediate (feedforward) dimension", + "- $L$ : number of layers", + "- $T$ : number of generated tokens (i.e., context length)", + "- $N$ : number of sequences generated (e.g., in best-of- $N$ sampling)" + ], + "bbox": [ + 176, + 520, + 653, + 619 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "D.1 Step-by-step FLOPs Calculation", + "text_level": 1, + "bbox": [ + 171, + 627, + 457, + 643 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "1. Per-layer linear FLOPs per token. We break down the linear FLOPs for each transformer layer into attention and MLP components:", + "bbox": [ + 169, + 646, + 823, + 676 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Self-attention:", + "text_level": 1, + "bbox": [ + 176, + 686, + 303, + 700 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Query projection: $d_{\\mathrm{model}} \\times d_{\\mathrm{model}}$", + "- Key projection: $d_{\\mathrm{model}} \\times d_{\\mathrm{kV}}$", + "- Value projection: $d_{\\mathrm{model}} \\times d_{\\mathrm{kv}}$", + "- Output projection: $d_{\\mathrm{model}} \\times d_{\\mathrm{model}}$" + ], + "bbox": [ + 209, + 700, + 473, + 758 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "This results in a total of:", + "bbox": [ + 192, + 758, + 370, + 770 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {F L O P s} _ {\\text {a t t e n t i o n - l i n e a r}} = 2 d _ {\\text {m o d e l}} ^ {2} + 2 d _ {\\text {m o d e l}} d _ {\\mathrm {k v}}\n$$\n", + "text_format": "latex", + "bbox": [ + 352, + 773, + 661, + 792 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "MLP (Feedforward):", + "text_level": 1, + "bbox": [ + 176, + 795, + 348, + 808 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "MLP layers include following components:", + "bbox": [ + 192, + 810, + 506, + 824 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Gate projection", + "- Up projection", + "- Down projection" + ], + "bbox": [ + 197, + 824, + 336, + 869 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Each of these MLP layers costs: $d_{\\mathrm{model}} \\times d_{\\mathrm{ff}}$ , giving:", + "bbox": [ + 192, + 869, + 566, + 885 + ], + "page_idx": 17 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {F L O P s} _ {\\mathrm {m l p}} = 3 d _ {\\mathrm {m o d e l}} d _ {\\mathrm {f f}}\n$$\n", + "text_format": "latex", + "bbox": [ + 424, + 888, + 593, + 906 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 31, + 517, + 47 + ], + "page_idx": 17 + }, + { + "type": "page_footnote", + "text": "${}^{2}$ key/value dimension is different from hidden dimension because of GQA (Ainslie et al., 2023)", + "bbox": [ + 189, + 907, + 813, + 925 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Combining both components, the total per-token linear cost per layer is:", + "bbox": [ + 169, + 103, + 694, + 119 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {F L O P s} _ {\\mathrm {l i n e a r}} = 2 d _ {\\mathrm {m o d e l}} ^ {2} + 2 d _ {\\mathrm {m o d e l}} d _ {\\mathrm {k v}} + 3 d _ {\\mathrm {m o d e l}} d _ {\\mathrm {f f}}\n$$\n", + "text_format": "latex", + "bbox": [ + 323, + 127, + 671, + 147 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "2. Quadratic attention cost. Self-attention involves computing interactions between all token pairs, resulting in a quadratic cost:", + "bbox": [ + 169, + 161, + 823, + 191 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {F L O P s} _ {\\text {a t t e n t i o n}} = d _ {\\text {m o d e l}} \\cdot \\frac {T (T + 1)}{2}\n$$\n", + "text_format": "latex", + "bbox": [ + 370, + 198, + 622, + 229 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "3. Total generation cost per sequence. Each token attends to all previous tokens across all $L$ layers. The generation cost for a single sequence is:", + "bbox": [ + 169, + 244, + 823, + 273 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {F L O P s} _ {\\mathrm {g e n}} = L \\cdot \\left(\\mathrm {F L O P s} _ {\\mathrm {l i n e a r}} \\cdot T + \\mathrm {F L O P s} _ {\\mathrm {a t t e n t i o n}}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 318, + 281, + 676, + 299 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "4. Total inference FLOPs. For $N$ sequences (e.g., best-of- $N$ sampling), the total inference cost is:", + "bbox": [ + 169, + 313, + 823, + 340 + ], + "page_idx": 18 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {F L O P s} _ {\\text {t o t a l}} = N \\cdot \\mathrm {F L O P s} _ {\\text {g e n}}\n$$\n", + "text_format": "latex", + "bbox": [ + 398, + 340, + 598, + 359 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We do not include auxiliary operations such as token embedding and softmax, weight norm, as their contribution is negligible compared to the transformer layers. All FLOPs reported in our experiments use this formula, with model configurations listed in Table 1.", + "bbox": [ + 169, + 373, + 826, + 417 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "E Majority voting versus best-of-n", + "text_level": 1, + "bbox": [ + 171, + 429, + 496, + 446 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In this work, we primarily use the best-of- $n$ metric to evaluate the direct solution model. This metric is suitable for tasks where verifying the correctness of a solution is trivial, whereas solving the task itself is challenging. Many real-world problems, such as coding tasks and combinatorial optimization, fall into this category. Conversely, for problems where verification is difficult, metrics such as majority voting may be more appropriate.", + "bbox": [ + 169, + 453, + 826, + 526 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "To illustrate this point, we additionally evaluate the CountDown direct solution model using both metrics in Figure 8. For majority voting, we generate n solutions per test problem, select the most frequently occurring solution (breaking ties randomly), and evaluate its correctness.", + "bbox": [ + 169, + 535, + 826, + 592 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "We find that the majority-voting performance closely approximates the direct solution model's one-shot accuracy (i.e., best-of- $n$ with $n = 1$ ). However, majority voting is less suitable for our task for several reasons. First, the CountDown game frequently has multiple correct solutions, so selecting the majority solution path can fail to detect cases where the model generates different but equally valid solutions. Second, while majority voting is appropriate in real-world LLM scenarios—such as mathematical reasoning—where distinct solution paths converge to the same final boxed answer, in our synthetic setting, where models are trained from scratch, majority voting essentially becomes a noisy proxy for greedy decoding (sampling at temperature $T = 0$ ). Thus, we expect and observe majority voting accuracy to closely track pass@1 accuracy.", + "bbox": [ + 169, + 603, + 826, + 744 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "In summary, given the characteristics of our task and the controlled experimental setup, best-of- $n$ remains a valid and preferred metric for evaluating direct solution models.", + "bbox": [ + 169, + 753, + 826, + 785 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "F Dependence on depth of the search tree", + "text_level": 1, + "bbox": [ + 169, + 796, + 560, + 815 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "F.1 Search tree depth", + "text_level": 1, + "bbox": [ + 171, + 821, + 344, + 837 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Why do backtracking models perform well on Selenium but underperform on CountDown, even when both are trained on DFS search traces? We argue that task characteristics—particularly those beyond our control in real—world settings—play a key role in determining whether backtracking is test-time-compute-efficient. A major difference between the two games lies in the depth of their search trees (Figure 2). In hard Selenium puzzles, only 20 out of 81 cells are pre-filled, leaving 50–60 cells to solve. This results in deep search trees with", + "bbox": [ + 169, + 839, + 828, + 926 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/932547a1aae0442be0b8ed55b64d894efab339f8a1e422c141b2a42aeb51fdfc.jpg", + "image_caption": [ + "Figure 8: Majority voting versus best-of- $n$ for CountDown direct solution model. For CountDown, verification is much easier than solving the problem. Therefore, best-of- $n$ as a performance is justified. Additionally, we also examine majority voting performance. However, CountDown solutions are not unique, majority voting is not the most suitable way to measure model performances." + ], + "image_footnote": [], + "bbox": [ + 344, + 107, + 653, + 251 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/c0806c8c2a5e96f1f7a0026e9c74be9031e96165b9ef739ed5759bc84f66787e.jpg", + "image_caption": [ + "Figure 9: The efficacy of backtracking depends on search tree depth. Left: We introduce a variation of the CountDown game—stacked CountDown—to increase the search tree depth. In the original CountDown game (solid bars), the direct solution model consistently outperforms the backtracking model, shown by a positive performance gap. In the stacked version (slanted bars), this gap is significantly reduced or even reversed, indicating that backtracking becomes more compute-efficient at greater depths. Right: We introduce a variation of Sudoku—easy Sudoku—where the initial board has more pre-filled cells to decrease the search tree depth. In the original setting, the 38M direct solution model (bottom, solid bars) underperforms the backtracking model. In the shallow Sudoku variant (slanted bars), the performance gap narrows across compute budgets. For the 17M models (top), the results are less conclusive." + ], + "image_footnote": [], + "bbox": [ + 227, + 356, + 488, + 542 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/f6b5c72489b87d26c48b44d4f55f9ca147976d6fd1d61acc75e1b1ed0f2b685f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 356, + 769, + 542 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "extensive trial-and-error, with many backtracking steps. In contrast, CountDown (in our setup) uses 4 candidate numbers, limiting the search tree depth to just 3. We hypothesize that backtracking models excels at tasks with deeper search trees, while shallow trees make parallel strategies (i.e., direct solution model) more effective. To test this, we design a variant of CountDown with increased search depth and a variant of Sudoku with reduced depth.", + "bbox": [ + 169, + 737, + 823, + 809 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "F.2 A deeper CountDown", + "text_level": 1, + "bbox": [ + 171, + 820, + 375, + 835 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Set up To increase the search tree depth in CountDown, one might naively scale up the number of candidate numbers. However, this approach quickly leads to exponential growth in tree width: with 4 candidates, the tree contains 1,152 nodes; with 5 candidates, it grows to 46,080. To prevent the exponential growth in the number of search paths, we design a stacked CountDown variant that increases depth while controlling tree width. In this setup, the player is given 8 candidate numbers and a final target. The first 4 numbers must be used", + "bbox": [ + 169, + 839, + 826, + 925 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "to reach the 5th number (\"a partial goal\"), and the remaining 4 numbers must then be used to reach the final target. This effectively stacks two CountDown problems, increasing depth without combinatorial explosion. We generate training data for both backtracking and no-backtracking models following the same procedure as in Section 3.1.2, with examples provided in Appendix H (Figure 16). We train a 17M as well as a 38M model until validation loss has converged, and test on 200 unseen problems.", + "bbox": [ + 169, + 103, + 826, + 189 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Results In Figure 9 (left), we compare the performance gap between the direct solution model and the backtracking model, measured by the difference in test accuracy. In the original CountDown setting (solid bars), the direct solution model consistently outperforms the backtracking model across all test compute budgets. However, in the stacked CountDown variant (slanted bars), the performance gap narrows significantly—and in some cases, reverses. The sign reverse indicates the backtracking model now outperforms the direct solution model. These results support our hypothesis: in CountDown, backtracking becomes more compute-efficient as the search tree depth increases. We observe this trend across both 17M and 38M models.", + "bbox": [ + 169, + 200, + 826, + 325 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "F.3 A shallower Selenium", + "text_level": 1, + "bbox": [ + 171, + 340, + 366, + 354 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Set up To reduce the search tree depth in Selenium, we generate easier boards by increasing the number of initially filled cells. Specifically, we take the original 3M Selenium dataset Radcliffe (2020) and apply the direct solution model (Section 3.2.2) to correctly fill 10 additional cells. This increases the average number of pre-filled cells from 20 to around 30, effectively decreasing search tree depth. We generate both backtracking and direct solution training data following the same procedure in Section 3.2.2. Models with 17M and 38M parameters are trained to convergence and evaluated on 200 unseen problems.", + "bbox": [ + 169, + 361, + 826, + 460 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Results In Figure 9 (right), we show the performance gap between the direct solution and backtracking models, measured by the difference in test accuracy. In the original (hard) Sudoku setting, the 38M direct solution model consistently underperforms the backtracking model, as indicated by the negative gaps (solid green bars). In the shallow-Sudoku variant (slanted bars), these gaps are reduced across all test-time compute budgets for the 38M model. The trend is less clear for the 17M model, where the performance difference remains small in both settings. Overall, these results support our hypothesis: in Sudoku, backtracking becomes more test-time-compute-efficient when the search tree is deeper.", + "bbox": [ + 169, + 472, + 826, + 585 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "G Additional results", + "text_level": 1, + "bbox": [ + 171, + 602, + 375, + 618 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "G.1 Exploring different CountDown strategies", + "text_level": 1, + "bbox": [ + 171, + 628, + 532, + 645 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "We analyze different search strategies for CountDown, including DFS and BFS with varying beam widths. For each strategy, we tokenize the resulting backtracking trace and measure number of tokens used in each search trace. The goal is to identify which strategy that finds correct solutions with the fewest tokens (Figure 10). The results show no clear winner. BFS with a smaller beam width produces shorter traces by exploring fewer nodes, but this comes at the cost of missing correct solutions more frequently. Increasing the beam width improves solution coverage but leads to longer traces due to broader exploration.", + "bbox": [ + 169, + 650, + 826, + 750 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "In contrast, DFS produces more uniformly distributed trace lengths but suffers from a specific failure mode: it may prune the correct path early and terminate prematurely. These failures appear as short but incorrect traces, visible as the left-most orange bars in Figure 10 (bottom).", + "bbox": [ + 169, + 762, + 825, + 820 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "G.2 Compare think-backtrack and backtrack", + "text_level": 1, + "bbox": [ + 171, + 832, + 517, + 848 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Table 3 further shows a confusion matrix comparing the original and think-backtrack models. The backtracking model solves 102 test problems in total with maximum test-time compute budget (4096 tokens). Out of those 102 problems, the think-backtrack model solves most of them. This evidence further shows that by training on shortened search traces, the model learns to internalize parts of its thinking without sacrificing performances.", + "bbox": [ + 169, + 853, + 826, + 926 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 20 + }, + { + "type": "image", + "img_path": "images/fc56edd9b4072e1423ef98a3d12b83f40442c91276c3388b7c61b2accbcc2e7f.jpg", + "image_caption": [ + "Figure 10: Examine different search strategies for CountDown. Beyond DFS, we experiment with Bread-First-Search (BFS) with different beam widths. We tokenize the search trace and measure the number of tokens as search length. There is not one search algorithm that is optimal to generate both short and correct solution traces." + ], + "image_footnote": [], + "bbox": [ + 272, + 102, + 723, + 398 + ], + "page_idx": 21 + }, + { + "type": "table", + "img_path": "images/a5baf5c34e74588f14d8ecbd99a21e83bd733de70d8b276db19b5acc6c40b6b3.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
T-B CorrectT-B Incorrect
B Correct8319
B Incorrect4157
", + "bbox": [ + 334, + 481, + 661, + 542 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Table 3: Confusion matrix between Think-Backtrack (T-B) and Backtrack (B) models.", + "bbox": [ + 191, + 551, + 800, + 566 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "G.3 Supervised learning training curve", + "text_level": 1, + "bbox": [ + 169, + 594, + 478, + 609 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "During training, we set the maximum epochs to 30 epochs and allow early stopping. All models converge before 30 epochs and we early stop training when the validation loss has converged on log-log scale. Figure 11, 12 show the training curve for both models and for CountDown and Selenium.", + "bbox": [ + 169, + 612, + 823, + 667 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "G.4 Additional GRPO plots", + "text_level": 1, + "bbox": [ + 169, + 678, + 397, + 694 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "In Figure 3 (Section 4.2.1), we used the number of mistakes as a proxy for comparing search strategies. To further demonstrate that the backtracking model fine-tuned with GRPO discovers new strategies, we repeat the same analysis in Figure 13 (right). Compared to the original backtracking model (Figure 13, left), the post-GRPO model solves many problems with a different number of mistakes than the number of mistakes made by DFS. This shift indicates that the model is no longer tightly aligned with the original search trace and has discovered alternative, more diverse solution paths. Figure 6 (left) quantifies the above qualitative observation.", + "bbox": [ + 169, + 696, + 826, + 809 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "H Data sample", + "text_level": 1, + "bbox": [ + 171, + 821, + 326, + 840 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Figure 14 shows an example of a CountDown game and the training data. Figure 15 shows an example of a Sudoku game and the training data. Figure 16 shows an example of stacked-CountDown variation and the training data.", + "bbox": [ + 169, + 845, + 825, + 888 + ], + "page_idx": 21 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 171, + 32, + 517, + 47 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 21 + }, + { + "type": "image", + "img_path": "images/70e27d83371e2162ae2ad3312e8604d72f509e110ba9bf9f581d4ee15f31c6b6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 271, + 165, + 504, + 305 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/a7f0cb936326cfeac7c67d4002567f49d6d2af471738d349c7d0ed30b32045bb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 504, + 166, + 727, + 305 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/8eb41fd414170e86a8c76ead06fd22df26d60ebd1999e2cf7e9f99d3edb8492a.jpg", + "image_caption": [ + "Figure 11: CountDown validation loss. Left: Backtracking model. Right: Direct solution model." + ], + "image_footnote": [], + "bbox": [ + 271, + 347, + 500, + 486 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/ae64856c00c49718b08575467901ac63bde0a1130a794f68a3fc2292876c436c.jpg", + "image_caption": [ + "Figure 12: Sodomu validation loss. Left: Backtracking model. Right: Direct solution model." + ], + "image_footnote": [], + "bbox": [ + 504, + 347, + 725, + 486 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/926acdc15b5f62a9030e2b341cc566625d663dc62c206a1cfe0ddd4f77598eea.jpg", + "image_caption": [ + "Figure 13: Backtracking model can discover some new search strategies. Post GRPO, the backtracking model discover new strategies: In the right panel, for each problem, the post-GRPO model makes a different number of mistakes compared to DFS trace." + ], + "image_footnote": [], + "bbox": [ + 238, + 647, + 493, + 801 + ], + "page_idx": 22 + }, + { + "type": "image", + "img_path": "images/e36fe3888388f8faacc648f6647e91131e8f15d326a7c7ed9d7a8e014287a021.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 501, + 647, + 756, + 801 + ], + "page_idx": 22 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Example of a CountDown game", + "text_level": 1, + "bbox": [ + 197, + 135, + 437, + 152 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Target: 16 Candidates: [96, 11, 78, 22]", + "bbox": [ + 196, + 162, + 558, + 178 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "DFS Search Trace", + "text_level": 1, + "bbox": [ + 197, + 196, + 331, + 209 + ], + "page_idx": 23 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Current State: 16:[96, 11, 78, 22], Operations: [ ] \nExploring Operation: 96-78=18, Resulting Numbers: [11, 22, 18] \nGenerated Node #0,0: 16:[11, 22, 18] Operation: 96-78=18 \nMoving to Node #0,0 \nCurrent State: 16:[11, 22, 18], Operations: ['96-78=18'] \nExploring Operation: 22-11=11, Resulting Numbers: [18, 11] \nGenerated Node #0,0,0: 16:[18, 11] Operation: 22-11=11 \nMoving to Node #0,0,0 \nCurrent State: 16:[18, 11], Operations: ['96-78=18', '22-11=11'] \nExploring Operation: 18-11=7, Resulting Numbers: [7] \n7,16 unequal: No Solution \nMoving to Node #0,0,0 \nCurrent State: 16:[18, 11], Operations: ['96-78=18', '22-11=11'] \nExploring Operation: 18+11=29, Resulting Numbers: [29] \n29,16 unequal: No Solution \nMoving to Node #0,0 \nCurrent State: 16:[11, 22, 18], Operations: ['96-78=18'] \nExploring Operation: 18-11=7, Resulting Numbers: [22, 7] \nGenerated Node #0,0,1: 16:[22, 7] Operation: 18-11=7 \nMoving to Node #0,0,1 \nCurrent State: 16:[22, 7], Operations: ['96-78=18', '18-11=7'] \nExploring Operation: 22-7=15, Resulting Numbers: [15] \n15,16 unequal: No Solution \nMoving to Node #0,0,1 \nCurrent State: 16:[22, 7], Operations: ['96-78=18', '18-11=7'] \nExploring Operation: 22+7=29, Resulting Numbers: [29] \n29,16 unequal: No Solution \nMoving to Node #0,0 \nCurrent State: 16:[11, 22, 18], Operations: ['96-78=18'] \nExploring Operation: 22/11=2, Resulting Numbers: [18, 2] \nGenerated Node #0,0,2: 16:[18, 2] Operation: 22/11=2 \nMoving to Node #0,0,2 \nCurrent State: 16:[18, 2], Operations: ['96-78=18', '22/11=2'] \nExploring Operation: 18-2=16, Resulting Numbers: [16] \n16,16 equal: Goal Reached", + "guess_lang": "txt", + "bbox": [ + 225, + 220, + 699, + 660 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Direct Solution Trace", + "text_level": 1, + "bbox": [ + 197, + 680, + 359, + 694 + ], + "page_idx": 23 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Current State: 16:[96, 11, 78, 22], Operations: [ ] \nExploring Operation: 96-78=18, Resulting Numbers: [11, 22, 18] \nGenerated Node #2: [11, 22, 18] from Operation: 96-78=18 \nCurrent State: 16:[11, 22, 18], Operations: ['96-78=18'] \nExploring Operation: 22/11=2, Resulting Numbers: [18, 2] \nGenerated Node #3: [18, 2] from Operation: 22/11=2 \nCurrent State: 16:[18, 2], Operations: ['96-78=18', '22/11=2'] \nExploring Operation: 18-2=16, Resulting Numbers: [16] \n16,16 equal: Goal Reached", + "guess_lang": "txt", + "bbox": [ + 225, + 703, + 684, + 816 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Figure 14: Example of training data for CoutnDown. Top: game setup. Middle: full DFS search trace for training backtracking model. Bottom: correct solution path for training direct solution model.", + "bbox": [ + 169, + 845, + 826, + 887 + ], + "page_idx": 23 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 23 + }, + { + "type": "table", + "img_path": "images/7e900a1a3702d874176ede358ecb138cc0af4e0c40a03c1f9b5a345fc93aee4a.jpg", + "table_caption": [ + "Initial Board" + ], + "table_footnote": [], + "table_body": "
7245
17
839
51
297
32
84
4789
6213
", + "bbox": [ + 243, + 191, + 493, + 372 + ], + "page_idx": 24 + }, + { + "type": "table", + "img_path": "images/b78f6358bb8f2f1fa9c15f4102fea440d9c5a986e0710809e9168c38b2874448.jpg", + "table_caption": [ + "Solved Board" + ], + "table_footnote": [], + "table_body": "
712498635
964135278
583726194
876954312
239861457
451372986
328549761
147683529
695217843
", + "bbox": [ + 503, + 191, + 754, + 372 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Example of a Sudoku game", + "text_level": 1, + "bbox": [ + 197, + 388, + 403, + 402 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "START $(0, 0) = 7$ $(0, 2) = 2$ $(0, 3) = 4$ $(0, 8) = 5$ $(1, 3) = 1$ $(1, 7) = 7$ $(2, 1) = 8$ $(2, 2) = 3$ $(2, 7) = 9$ $(3, 4) = 5$ $(3, 7) = 1$ $(4, 0) = 2$ $(4, 2) = 9$ $(4, 8) = 7$ $(5, 3) = 3$ $(5, 5) = 2$ $(6, 2) = 8$ $(6, 4) = 4$ $(7, 1) = 4$ $(7, 2) = 7$ $(7, 4) = 8$ $(7, 8) = 9$ $(8, 0) = 6$ $(8, 3) = 2$ $(8, 4) = 1$ $(8, 8) = 3$ solving", + "bbox": [ + 196, + 414, + 800, + 467 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "DFS Search Trace", + "text_level": 1, + "bbox": [ + 197, + 484, + 330, + 497 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "SOL_START \\((4, 4) = 6\\) \\((8, 2) = 5\\) \\((4, 3) = 8\\) \\((8, 1) = 9\\) \\((8, 5) = 7\\) GUESS: \\((0, 1)\\) \\([1, 6] = 1\\) \\((0, 1) = 1\\) GUESS: \\((0, 4)\\) \\([3, 9] = 3\\) \\((0, 4) = 3\\) GUESS: \\((0, 6)\\) \\([6, 8] = 6\\) \\((0, 6) = 6\\) \\((0, 7) = 8\\) \\((8, 7) = 4\\) \\((0, 5) = 9\\) \\((1, 4) = 2\\) \\((1, 8) = 4\\) \\((2, 4) = 7\\) \\((3, 5) = 1\\) \\((4, 5) = 9\\) \\((8, 6) = 8\\) \\((1, 2) = 6\\) \\((1, 6) = 3\\) \\((3, 3) = 7\\) \\((1, 1) = 5\\) \\((1, 5) = 8\\) \\((2, 0) = 4\\) \\((4, 1) = 3\\) \\((4, 7) = 5\\) \\((5, 6) = 4\\) \\((5, 7) = 6\\) \\((5, 8) = 8\\) \\((6, 1) = 2\\) \\((7, 7) = 2\\) \\((1, 0) = 9\\) \\((3, 0) = 8\\) \\((3, 1) = 6\\) \\((3, 8) = 2\\) \\((5, 1) = 7\\) \\((5, 2) = 1\\) \\((2, 8) = 1\\) \\((3, 6) = 9\\) \\((5, 0) = 5\\) \\((6, 8) = 6\\) \\((2, 6) = 2\\) NO_CANDIDATE: \\((3, 2)\\) NO CANDIDATE: \\((0, 6)\\) REVERT: \\((0, 6)\\) \\([6, 8] =\\) NONE GUESS: \\((0, 6)\\) \\([6, 8] = 8\\) \\((0, 6) = 8\\) \\((0, 7) = 6\\) \\((8, 6) = 4\\) \\((8, 7) = 8\\) \\((0, 5) = 9\\) \\((1, 4) = 2\\) \\((1, 6) = 3\\) \\((1, 8) = 4\\) \\((2, 4) = 7\\) \\((3, 5) =\\) \\(4\\) \\((4, 5) = 1\\) \\((4, 6) = 5\\) \\((5, 4) = 9\\) \\((5, 6) = 6\\) \\((5, 7) = 4\\) \\((5, 8) = 8\\) \\((1, 2) = 6\\) \\((3, 3) = 7\\) \\((3, 8) = 2\\) \\((4,\\) \\(1) = 3\\) \\((5,2) = 1\\) \\((6,1) = 2\\) \\((6,7) = -2\\) \\((1,1) = -5\\) \\((1,5) = -8\\) \\((2,0) = -4\\) \\((2,8) = -1\\) \\((3,0)=\\( \\(8(3,1) = -6(3,6) = -7(7,0)=3(6,0)=1\\text{NO\\_CANDIDATE}:(3,2)\\text{NO\\_CANDIDATE}:(0,6)\\text{revert}:(0,6)[6,(8)]=\\)\\(\\text{NO\\_CANDIDATE NO\\_CANDIDATE}:(0,4)\\text{REVERT}:(0,4)[3,9]=\\text{NONE GUESS}:(0,4)[3,9]=9(0,4)=9(5,4)=7(2,4)=2(3,3)=9(3,5)=4(4,5)=1(1,4)=3(3,2)=6(5,1)=5(1,1)=6(1,2)=4(2,\\)\\)\\(\\begin{array}{r}\\emptyset=5(2,5)=6(4,1)=3(5,2)=1(6,1)=2(0,5)=8(1,\\emptyset)=9(1,5)=5(2,3)=7(3,\\emptyset)=8\\\\(3,\\text{1})=7(3,\\text{8})=2(5,\\emptyset)=4(7,\\text{5})=3(1,\\text{8})=8(3,\\text{6})=3(5,\\text{8})=6(6,\\text{5})=9(6,\\text{8})=1(7,\\emptyset)=\\\\1(0,\\emptyset)=6(0,\\emptyset)=3(1,\\emptyset)=2(2,\\emptyset)=4(5,\\emptyset)=8(6,\\emptyset)=3(7,\\emptyset)=5(8,\\emptyset)=4(2,\\emptyset)=1(4,\\emptyset)\\end{array}\\) \\(= (3,1) = -7(3,\\text{8}) = -6(4,\\text{7}) = -3(1,\\text{6}) = -2(2,\\text{8}) = -4(5,\\text{7}) = -8(6,\\text{0}) = -3(7,\\text{7}) = -2(8,\\text{6}) = -8(6,\\text{3}) = -5\\mathrm{SOL}\\quad\\mathrm{END}\\)", + "bbox": [ + 196, + 507, + 800, + 681 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Correct Solution", + "text_level": 1, + "bbox": [ + 197, + 700, + 321, + 713 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "$\\begin{array}{l}\\mathrm{SOL\\_START(4,4) = 6(8,2) = 5(4,3) = 8(8,1) = 9(8,5) = 7(0,1) = 1(0,4) = 9(5,4) = 7(2,4) =}\\\\ \\mathrm{2(3,3) = 9(3,5) = 4(4,5) = 1(1,4) = 3(3,2) = 6(5,1) = 5(1,1) = 6(1,2) = 4(2,0) = 5(2,5)}\\\\ \\mathrm{= 6(4,1) = 3(5,2) = 1(6,1) = 2(0,5) = 8(1,0) = 9(1,5) = 5(2,3) = 7(3,0) = 8(3,1) = 7(3,}\\\\ \\mathrm{8) = 2(5,0) = 4(7,5) = 3(1,8) = 8(3,6) = 3(5,8) = 6(6,5) = 9(6,8) = 1(7,0) = 1(0,6) = 6}\\\\ (\\emptyset ,7) = 3(1,6) = 2(2,8) = 4(5,7) = 8(6,\\emptyset) = 3(7,6) = 5(8,7) = 4(2,6) = 1(4,6) = 4(4,7) = \\\\ \\mathrm{5(5,6)} = 9(6,6) = 7(6,7) = 6(7,3) = 6(7,7) = 2(8,6) = 8(6,3) = 5\\mathrm{SOL\\_END} \\end{array}$", + "bbox": [ + 196, + 722, + 800, + 786 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Figure 15: Example of training data for Sudoku game. Top: initial puzzle setup. Middle: full search trace with guesses and backtracking (tabs used). Bottom: final correct solution.", + "bbox": [ + 169, + 813, + 826, + 842 + ], + "page_idx": 24 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Example of a stacked-CountDown game", + "text_level": 1, + "bbox": [ + 197, + 104, + 500, + 119 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Target: 96 Candidates: [22, 77, 24, 48, 31, 12, 36, 35]", + "bbox": [ + 197, + 130, + 687, + 146 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "DFS Search Trace", + "text_level": 1, + "bbox": [ + 197, + 164, + 330, + 178 + ], + "page_idx": 25 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Current State: 96:[22, 77, 24, 48, 31, 12, 36, 35], Operations: [] Exploring Operation: $77 - 48 = 29$ ,Resulting Numbers: [22, 24, 29, 31, 12, 36, 35] Generated Node #0,0:96:[22, 24, 29, 31, 12, 36, 35]Operation: $77 - 48 = 29$ Moving to Node $\\# 0,0$ Current State:96:[22, 24, 29, 31, 12, 36, 35],Operations:['77-48=29'] Exploring Operation: $22 + 24 = 46$ ,Resulting Numbers: [29, 46, 31, 12, 36, 35] Generated Node $\\# 0,0,0$ :96:[29,46,31,12,36,35]Operation: $22 + 24 = 46$ Moving to Node $\\# 0,0,0$ Current State:96:[29,46,31,12,36,35],Operations:'77-48=29', '22+24=46' ] Exploring Operation: $46 - 29 = 17$ ,Resulting Numbers: [17,31,12,36,35] 17,31 unequal Moving to Node $\\# 0,0$ ... . . . Current State:96:[29,2,31,12,36,35],Operations:'77-48=29', '24-22=2' ] Exploring Operation: $29 + 2 = 31$ ,Resulting Numbers: [31,31,12,36,35] 31,31 equal Current State:96:[31,12,36,35],Operations:[] Exploring Operation: $36 - 35 = 1$ ,Resulting Numbers: [31,12,1] Generated Node $\\# 0,0$ :96:[31,12,1]Operation: $36 - 35 = 1$ Moving to Node $\\# 0,0$ Current State:96:[31,12,1],Operations:'36-35=1' Exploring Operation: $31 + 1 = 32$ ,Resulting Numbers: [12,32] Generated Node $\\# 0,0,0$ :96:[12,32]Operation: $31 + 1 = 32$ Moving to Node $\\# 0,0,0$ Current State:96:[12,32],Operations:' $36 - 35 = 1$ ,' $31 + 1 = 32$ ' Exploring Operation: $12 + 32 = 44$ ,Resulting Numbers: [44] 44.96 unequal:No Solution ... Exploring Operation: $4*24 = 96$ ,Resulting Numbers:[96]", + "guess_lang": "yaml", + "bbox": [ + 173, + 186, + 823, + 592 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Direct Solution Trace", + "text_level": 1, + "bbox": [ + 197, + 609, + 357, + 625 + ], + "page_idx": 25 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Current State: 96:[22, 77, 24, 48, 31, 12, 36, 35], Operations: [] Exploring Operation: 77-22=55, Resulting Numbers: [55, 24, 48, 31, 12, 36, 35] Generated Node #2: [55, 24, 48, 31, 12, 36, 35] from Operation: 77-22=55 Current State: 96:[55, 24, 48, 31, 12, 36, 35], Operations: ['77-22=55'] Exploring Operation: 48-24=24, Resulting Numbers: [55, 24, 31, 12, 36, 35] Generated Node #3: [55, 24, 31, 12, 36, 35] from Operation: 48-24=24 Current State: 96:[55, 24, 31, 12, 36, 35], Operations: ['77-22=55', '48-24=24'] Exploring Operation: 55-24=31, Resulting Numbers: [31, 31, 12, 36, 35] 31,31 equal Current State: 96:[31, 12, 36, 35], Operations: [] Exploring Operation: 35-31=4, Resulting Numbers: [4, 12, 36] Generated Node #2: [4, 12, 36] from Operation: 35-31=4 Current State: 96:[4, 12, 36], Operations: ['35-31=4'] Exploring Operation: 36-12=24, Resulting Numbers: [24, 4] Generated Node #3: [24, 4] from Operation: 36-12=24 Current State: 96:[24, 4], Operations: ['35-31=4', '36-12=24'] Exploring Operation: $4 \\times 24 = 96$ ,Resulting Numbers:[96] 96,96 equal: Goal Reached", + "guess_lang": "txt", + "bbox": [ + 173, + 632, + 818, + 863 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Figure 16: Example of training data for stacked-CoutnDown (Appendix F). Top: game setup. Middle: full DFS search trace for training backtracking model. Bottom: correct solution path for training direct solution model.", + "bbox": [ + 169, + 888, + 823, + 933 + ], + "page_idx": 25 + }, + { + "type": "header", + "text": "Published as a conference paper at COLM 2025", + "bbox": [ + 173, + 32, + 517, + 47 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 25 + } +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07052/ecb0ea5a-e806-4206-b96f-d3cf7b2ea5b5_model.json b/data/2025/2504_07xxx/2504.07052/ecb0ea5a-e806-4206-b96f-d3cf7b2ea5b5_model.json new file mode 100644 index 0000000000000000000000000000000000000000..46d8f3f329230881f2733be0c5b4f4c3b2727637 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/ecb0ea5a-e806-4206-b96f-d3cf7b2ea5b5_model.json @@ -0,0 +1,4058 @@ +[ + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.032, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.1, + 0.711, + 0.143 + ], + "angle": 0, + "content": "To Backtrack or Not to Backtrack: When Sequential Search Limits Model Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.173, + 0.262, + 0.188 + ], + "angle": 0, + "content": "Tian Qin*" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.188, + 0.33, + 0.203 + ], + "angle": 0, + "content": "Harvard University" + }, + { + "type": "text", + "bbox": [ + 0.488, + 0.172, + 0.656, + 0.187 + ], + "angle": 0, + "content": "David Alvarez-Melis" + }, + { + "type": "text", + "bbox": [ + 0.488, + 0.187, + 0.815, + 0.203 + ], + "angle": 0, + "content": "Harvard University, Kempner Institute, MSR" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.222, + 0.288, + 0.238 + ], + "angle": 0, + "content": "Samy Jelassi†" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.238, + 0.468, + 0.253 + ], + "angle": 0, + "content": "Harvard University, Kempner Institute" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.221, + 0.617, + 0.236 + ], + "angle": 0, + "content": "Eran Malach‡†" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.237, + 0.791, + 0.253 + ], + "angle": 0, + "content": "Harvard University, Kempner Institute" + }, + { + "type": "title", + "bbox": [ + 0.459, + 0.287, + 0.54, + 0.303 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.317, + 0.77, + 0.64 + ], + "angle": 0, + "content": "Recent advancements in large language models (LLMs) have significantly improved their reasoning abilities, particularly through techniques involving search and backtracking. Backtracking naturally scales test-time compute by enabling sequential, linearized exploration via long chain-of-thought (CoT) generation. However, this is not the only strategy for scaling test time-compute: parallel sampling with best-of-\\(n\\) selection provides an alternative that generates diverse solutions simultaneously. Despite the growing adoption of sequential search, its advantages over parallel sampling—especially under a fixed compute budget—remain poorly understood. In this paper, we systematically compare these two approaches on two challenging reasoning tasks: CountDown and Sudoku. Surprisingly, we find that sequential search underperforms parallel sampling on CountDown but outperforms it on Sudoku, suggesting that backtracking is not universally beneficial. We identify two factors that can cause backtracking to degrade performance: (1) training on fixed search traces can lock models into suboptimal strategies, and (2) explicit CoT supervision can discourage 'implicit' (non verbalized) reasoning. Extending our analysis to reinforcement learning (RL), we show that models with backtracking capabilities benefit significantly from RL fine-tuning, while models without backtracking see limited, mixed gains. Together, these findings challenge the assumption that backtracking universally enhances LLM reasoning, instead revealing a complex interaction between task structure, training data, model scale, and learning paradigm." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.656, + 0.321, + 0.672 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.679, + 0.828, + 0.807 + ], + "angle": 0, + "content": "Recent studies (Kumar et al., 2024; Havrilla et al., 2024) propose teaching LLMs to correct mistakes through backtracking, enabling exploration of alternative solutions. Despite growing popularity (DeepSeek-AI et al., 2025; Muennighoff et al., 2025), it remains unclear whether correcting errors post-hoc via backtracking is ultimately more compute-efficient at test time than directly learning the correct solution. Solving strategic games such as CountDown and Sudo require extensive exploration of different solution paths, making them ideal for analyzing the computational trade-offs of sequential versus parallel search. In this work, we use these two games to conduct a controlled investigation to determine whether backtracking is an effective way to scale test-time compute." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.816, + 0.828, + 0.889 + ], + "angle": 0, + "content": "There are two primary strategies to scale LLMs' test-time compute: sequential autoregressive search (explicit backtracking within a chain-of-thought) and parallel sampling (generating multiple independent solutions and selecting the best with best-of- \\( n \\)). While sequential search allows the model to refine reasoning by learning from past mistakes, it comes at a cost: due to the attention mechanism, the FLOPs required to generate CoT grow quadratically" + }, + { + "type": "page_footnote", + "bbox": [ + 0.19, + 0.896, + 0.651, + 0.911 + ], + "angle": 0, + "content": "*Correspondence to tqin@g.harvard.edu ‡ Equal senior contributions." + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.911, + 0.33, + 0.925 + ], + "angle": 0, + "content": "†Currently at Apple." + }, + { + "type": "list", + "bbox": [ + 0.19, + 0.896, + 0.651, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.285, + 0.059, + 0.717 + ], + "angle": 270, + "content": "arXiv:2504.07052v2 [cs.LG] 3 Oct 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.104, + 0.614, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.615, + 0.106, + 0.824, + 0.24 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.253, + 0.828, + 0.352 + ], + "angle": 0, + "content": "Figure 1: Backtracking performance varies significantly with task type and the application of post-training reinforcement learning. (A) Training backtracking and direct solution models on CountDown and Selenium reveals task-dependent performance: under equal test-time compute, backtracking (sequential search) underperforms direct solution with best-of-\\(n\\) generation (parallel search) on CountDown, but outperforms it on Selenium. (B) Fine-tuning with GRPO consistently improves backtracking model performance across compute budgets, but has mixed effects on the direct solution model." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.38, + 0.828, + 0.48 + ], + "angle": 0, + "content": "with sequence length. Even when generating the same number of tokens, sequential search incurs more FLOPs than parallel sampling. To compare these two strategies, we train (i) backtracking models that learn from explicit search traces and use sequential search to solve hard problems, and (ii) direct solution (i.e., no backtracking) models that learn solely from correct solutions, using parallel search at test time. Equating test-time compute, we observe contrasting results (Fig. 1 A): in CountDown, the backtracking model consistently underperforms, whereas in Sudo, it consistently outperforms the direct solution model." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.491, + 0.829, + 0.673 + ], + "angle": 0, + "content": "Through controlled experiments, we identify two reasons teaching backtracking can inadvertently degrade performance. First, explicit backtracking reasoning traces bias models toward prescribed search strategies, limiting exploration of potentially superior alternatives. In CountDown, the backtracking model closely mimics training search paths, while the direct solution model independently discovers more efficient strategies (Section 4.2). Second, detailed backtracking traces encourage morbidity (producing lengthy yet ineffective reasoning chains), while discouraging internal \"thinking\" (implicit reasoning without outputting CoT, Section 4.3). Beyond these factors, we demonstrate that model size and task-specific characteristics also impact the effectiveness of backtracking (Section 5.1). Crucially, we show that our contrastive observation between Sudoku and Countdown generalizes to real-world tasks: such as math and science problem solving. We show that backtracking is not always the most effective way to scale test-time compute (Appendix A) for general reasoning models." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.684, + 0.828, + 0.798 + ], + "angle": 0, + "content": "Extending beyond supervised learning, we evaluate reinforcement learning (RL) with Group Relative Policy Optimization (GRPO) (Shao et al., 2024), uncovering novel interactions between backtracking capabilities and RL. We show that the backtracking model discovers new, effective search strategies through RL, achieving substantial performance improvements. Conversely, the direct solution model improves one-shot accuracy but loses effectiveness in parallel search, revealing a clear trade-off (Fig. 1 B). This finding shifts our understanding of how backtracking influences a model's potential to improve under RL, highlighting the unique advantage of teaching backtracking for long-term reasoning capabilities." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.809, + 0.825, + 0.839 + ], + "angle": 0, + "content": "Our controlled study on two strategic games provides a nuanced understanding of when backtracking effectively scales test-time compute. Our main contributions are:" + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.852, + 0.826, + 0.895 + ], + "angle": 0, + "content": "- We use CountDown and Selenium as controlled testbeds to examine whether backtracking enables efficient test-time scaling. Under a fixed compute budget, backtracking outperforms parallel search in Selenium but underperforms in CountDown (Fig. 1 A)." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.896, + 0.826, + 0.926 + ], + "angle": 0, + "content": "- We identify two key factors affecting backtracking efficacy: (1) Prescribed search bias: Training on detailed backtracking traces can unintentionally constrain models to subopti" + }, + { + "type": "list", + "bbox": [ + 0.176, + 0.852, + 0.826, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.948, + 0.505, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.032, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.104, + 0.49, + 0.308 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.517, + 0.104, + 0.791, + 0.282 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.223, + 0.31, + 0.766, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.264, + 0.373, + 0.581, + 0.407 + ], + "angle": 0, + "content": "Direct solution model performs parallel search through temperature sampling:" + }, + { + "type": "image", + "bbox": [ + 0.285, + 0.409, + 0.625, + 0.49 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.507, + 0.825, + 0.592 + ], + "angle": 0, + "content": "Figure 2: Backtracking and direct solution for two different strategic games. Panel \\((a, b)\\): Example the search tree for CountDown and Sudoku. Solving both games require extensive search in the solution space. Panel \\((c)\\): The backtracking model is trained on the search traces generated by a Depth-First-Search (DFS) algorithm. At test time, the model performs sequential search. The direct solution model is trained on the correct solution only. At test time, the model performs parallel search through temperature sampling and takes best-of-\\(n\\)." + }, + { + "type": "text", + "bbox": [ + 0.19, + 0.616, + 0.825, + 0.647 + ], + "angle": 0, + "content": "mal search strategies. (2) Excessive morbidity: Explicit backtracking traces encourage models to produce lengthy reasoning chains without improving reasoning ability." + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.647, + 0.826, + 0.69 + ], + "angle": 0, + "content": "- We demonstrate that reinforcement learning (GRPO) consistently enhances backtracking models by enabling discovery of novel solutions, whereas direct solution models experience mixed outcomes (Fig. 1 B)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.703, + 0.329, + 0.719 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.727, + 0.585, + 0.742 + ], + "angle": 0, + "content": "See Appendix B for an extensive review on related work." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.749, + 0.827, + 0.863 + ], + "angle": 0, + "content": "Scaling test-time compute. Prior work has explored scaling language model performance at test time through parallel or sequential search strategies. Parallel methods rely on independent sampling and selection via heuristics or reward models (Brown et al., 2024; Irvine et al., 2023; Levi, 2024; Xin et al., 2024), while sequential methods refine reasoning step by step using earlier outputs (Hou et al., 2025; Lee et al., 2025). Tree-based methods such as MCTS bridge the two and often incorporate process-level reward models to guide reasoning (Wu et al., 2024; Lightman et al., 2023). Our work contributes to this area by comparing sequential (backtracking) and parallel search under fixed compute budgets." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Self-correction and backtracking. Language models can be trained to self-correct through fine-tuning on revision data, synthetic augmentations, or reward-based learning (Saunders et al., 2022; Qu et al., 2024; Welleck et al., 2022). Some approaches also introduce explicit search or separate correction modules to guide revision (Yao et al., 2023b; Havrilla et al.," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.103, + 0.827, + 0.134 + ], + "angle": 0, + "content": "2024). We build on this line of work by studying backtracking as an implicit form of self-correction, analyzing when learning to backtrack helps or hinders reasoning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.147, + 0.827, + 0.248 + ], + "angle": 0, + "content": "Reinforcement learning for LLM reasoning. Reinforcement learning has shown promise in enabling language models to autonomously discover reasoning strategies, including through simplified algorithms like GRPO (Shao et al., 2024; DeepSeek-AI et al., 2025). While prior work has demonstrated strong results, it remains unclear which model properties enable successful RL-based reasoning (Zelikman et al., 2022; Kazemnejad et al., 2024). Our study addresses this gap by comparing how backtracking and no backtracking models respond to RL fine-tuning, revealing asymmetric benefits." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.264, + 0.624, + 0.283 + ], + "angle": 0, + "content": "3 Two strategic games: CountDown and Sudoku" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.292, + 0.308, + 0.306 + ], + "angle": 0, + "content": "3.1 CountDown" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.313, + 0.313, + 0.329 + ], + "angle": 0, + "content": "3.1.1 Game setup" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.332, + 0.828, + 0.418 + ], + "angle": 0, + "content": "The Game of CountDown has been frequently used as a testbed to study and evaluate LLM reasoning (Gandhi et al., 2024; 2025; Yao et al., 2023a). In a CountDown game, the player is given a set of candidate numbers and a target number (restricted to integers). The goal is to reach the target by applying a sequence of arithmetic operations—addition, subtraction, multiplication, or division—using the candidate numbers. Each number must be used exactly once, and intermediate results can be reused in subsequent operations." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.43, + 0.828, + 0.531 + ], + "angle": 0, + "content": "To algorithmically solve CountDown, we can represent the problem as a search tree (Fig. 2a). Each node in the search tree corresponds to a state defined by the current set of available numbers. At each step, the algorithm selects a pair of numbers from the set and applies one of the four operations, replacing the pair with the resulting value to create a new state. This process continues recursively until the target number is reached (correct leaf node) or all combinations are exhausted (wrong leaf node). In this work, we play the CountDown with four candidate numbers, and for each game, there are 1,152 possible search paths." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.543, + 0.345, + 0.559 + ], + "angle": 0, + "content": "3.1.2 Data generation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.562, + 0.829, + 0.704 + ], + "angle": 0, + "content": "We generate backtracking traces with Depth First Search (DFS) with a sum-heuristic (Gandhi et al. (2024), further details in Appendix C.1). We generate a dataset of 500,000 CountDown questions, and the DFS search correctly solves \\(57\\%\\) of the questions. The backtracking trace is a serialized version of DFS, listing all the tree nodes visited in the order of DFS traversal. To construct the direct solution training data, we prune the backtracking traces to keep only the correct solution path. With the pruning approach, we remove the exploratory parts of the trace while preserving the answer format and scaffolding used in the backtracking model, to ensure a fair comparison. We also ensure that the direct solution model does not see more solved CountDown games, we include only the 285,000 questions (i.e., \\(500,000 \\times 0.57\\)) that DFS successfully solves. We provide examples of both training data in Appendix H." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.717, + 0.273, + 0.731 + ], + "angle": 0, + "content": "3.2 Suku" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.738, + 0.313, + 0.754 + ], + "angle": 0, + "content": "3.2.1 Game setup" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.758, + 0.828, + 0.926 + ], + "angle": 0, + "content": "Sudoku is another prototypical strategic game used to study reasoning and search in LLMs (Yao et al., 2023a; Long, 2023). In this work, we focus on hard \\(9 \\times 9\\) Sudo boards, where only about 20 of the 81 cells are pre-filled, making the search space substantially larger (see Appendix C.1 for a description of Sudo rules). To algorithmically solve Sudo, we represent the problem as a search tree (Fig. 2b). Each node corresponds to a partial board state, where some cells have been filled. At each step, the algorithm selects an unfilled cell and fills it with a candidate digit that satisfies Sudo constraints in the current state. Each valid assignment creates a new child node representing the updated board. The process continues recursively until a complete, valid solution is reached (correct leaf node) or no valid moves remain (wrong leaf node). The depth of the tree corresponds to the number of empty cells, and the branching factor at each node depends on the number of unfilled cells as well as how many digits are valid for each unfilled cell." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.101, + 0.386, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.101, + 0.605, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.61, + 0.102, + 0.822, + 0.221 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.232, + 0.828, + 0.358 + ], + "angle": 0, + "content": "Figure 3: Backtracking and direct solution models implement different search strategies for CountDown. For test questions that model solves correctly, we measure the number of mistakes made (i.e., wrong terminal nodes visited) before finding the correct solution. We sort the test questions by number of mistakes made by DFS. Left: Trained on DFS traces, the number of mistakes made by the backtracking model correlates with the DFS. Middle: In contrast, the direct solution model solves a lot more problems with significantly fewer mistakes compared to DFS. Right: For a given number of mistakes made, we examine whether two models solve the same set of question as DFS. Direct solution model implements a search strategy significantly different from DFS." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.383, + 0.345, + 0.398 + ], + "angle": 0, + "content": "3.2.2 Data generation" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.4, + 0.828, + 0.568 + ], + "angle": 0, + "content": "We follow the same procedure as CountDown to generate training data for both the backtracking and direct solution models. We use a DFS-based search algorithm, in combination with a Sudoku solver that applies seven common human strategies (e.g., naked singles, hidden pairs and etc, Papadimas. (2023)) to eliminate candidates for unfilled cells. At each node, we use the 7 strategies to eliminate candidates for unfilled cells, and then DFS chooses an unfilled cell, makes a guess and continues solving recursively. This process continues until the board is either solved or reaches a dead-end (i.e., an invalid state with no legal moves). We use a dataset of 3M Sudoku puzzles from (Radcliffe, 2020), and the combined DFS-solver approach successfully solves \\(98\\%\\) of them. Since DFS successfully solves nearly all puzzles, we train both models on 2.8M examples and reserve the last 200K for validation and testing. We provide further details on Sudoku training data generation in Appendix C.1 and data examples in Appendix H." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.576, + 0.36, + 0.592 + ], + "angle": 0, + "content": "3.3 Model and training" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.594, + 0.827, + 0.721 + ], + "angle": 0, + "content": "We use Qwen2.5-style model architectures (Yang et al., 2024) with RoPE positional encoding (Su et al., 2021) and Group Query Attention (GQA) (Ainslie et al., 2023). To maximize parameter efficiency, we design custom tokenizers for both games, significantly reducing the size of the language modeling head. This allows us to train smaller models than prior work (Gandhi et al., 2024; Shah et al., 2024) while maintaining comparable performance on both tasks. For CountDown, we use a 17M parameter model with a context length of 4096 tokens; for Selenium, we use a 38M model with the same context length. See Appendix C.3 for model architecture and an exhaustive list of training hyperparameters. We train all models until validation loss converges (see Appendix G.3)." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.733, + 0.531, + 0.751 + ], + "angle": 0, + "content": "4 Empirical trade-offs of backtracking" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.756, + 0.825, + 0.814 + ], + "angle": 0, + "content": "We first demonstrate that backtracking models do not universally outperform the direct solution models (Section 4.1) because backtracking models are restricted to learn a prescribed way of search (Section 4.2). We then identify two factors (Sections 4.3) showing how we might improve test-time scaling for backtracking models." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.822, + 0.49, + 0.837 + ], + "angle": 0, + "content": "4.1 Backtracking is not always beneficial" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.84, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Evaluation metrics. We evaluate model performances using solving accuracy on 200 unseen problems with binary scores (either correct or incorrect, no partial credits, see appendix C.1). We use FLOPs to compare inference costs (see Appendix D for FLOPs computation). For the backtracking model, we allow models to autoregressively generate and measure how many problems the model finds the correct solution at various CoT lengths (ranging from 1024 to 4096 tokens). For the direct solution model, we generate" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.215 + ], + "angle": 0, + "content": "\\(n\\) solutions in parallel through temperature sampling at \\(T = 0.7\\), and examine whether the model has found the correct solution within \\(n\\) attempts (i.e., best-of-\\(n\\)). Best-of-\\(n\\) is a suitable choice in those two games, a case where solving the task is hard but verification is trivial. In general, our analysis applies to tasks where verification can be easily done with an external verifier at test-time. This is definitely not always the case, and we leave the study of problems where test-time verification is not as easy to future work. In those tasks, one might need to consider majority voting or other strategies. See Appendix E for further discussions." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.223, + 0.828, + 0.42 + ], + "angle": 0, + "content": "Results. In Fig. 1 A, we observe distinct scaling behaviors for the two models. For both games, the direct solution model's test accuracy scales linearly with increased test-time compute (measured on a logarithmic scale). This scaling behavior indicates that through parallel sampling, the backtracking model generates diverse solutions that search through different solution paths. Conversely, the backtracking model exhibits sub-linear scaling: Longer solution traces disproportionately yield smaller accuracy improvements. We attribute the sub-linear scaling to two causes. First, as reasoning chains become longer, the backtracking model might struggle to effectively track visited states and efficiently search through the solution space. Second, when models perform sequential search, the computation cost grows quadratically with CoT length (due to the attention mechanism, see Appendix D), and this further makes backtracking model less effective for scaling up test time compute. Overall, for CountDown, the direct solution model consistently outperforms its backtracking counterpart. However, this trend is reversed in Selenium, where the backtracking model consistently achieves higher accuracy." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.429, + 0.611, + 0.443 + ], + "angle": 0, + "content": "4.2 Backtracking model learns both the good and the bad" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.447, + 0.828, + 0.617 + ], + "angle": 0, + "content": "When teaching a child to correct math mistakes, the child understands that the goal is the correct answer—not making and then fixing errors. Humans have meta-cognitive awareness that models lack. Models trained via next-token prediction simply imitate the traces they see, including making the mistake before fixing it. In CountDown, this poses a key limitation: the backtracking model learns to follow the specific search paths seen in training. While some tasks—like shortest path finding—have optimal strategies we can supervise directly (e.g., Dijkstra's algorithm), most reasoning tasks, including CountDown, lack such guarantees. As a result, the model may be constrained by the inefficiencies in the backtracking data. In contrast, the direct solution model, trained only on correct answers, is free to discover more efficient strategies. In our subsequent analysis, we concretely show how the direct solution model successfully bypasses many inefficient search and backtracking steps learned by the backtracking model." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.622, + 0.655, + 0.637 + ], + "angle": 0, + "content": "4.2.1 Backtracking model finds the solution with fewer mistakes" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.638, + 0.825, + 0.711 + ], + "angle": 0, + "content": "Measuring number of mistakes. We compare the number of mistakes made by: (1) DFS (used to generate backtracking data), (2) the backtracking model, and (3) the direct solution model. For DFS and the backtracking model, mistakes are counted as the number of incorrect terminal nodes explored before finding the correct solution. For the direct solution model, mistakes correspond to how many parallel samples (\\(n\\) in best-of-\\(n\\)) are needed." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.719, + 0.827, + 0.902 + ], + "angle": 0, + "content": "Comparing search strategies. We sort the 200 test problems based on mistakes made by DFS and plot mistakes for both models. Fig. 3 left compares DFS search and backtracking model. The number mistakes made by the backtracking model is correlated with the DFS backtracking trace. This observation is not surprising given that the backtracking model is trained on these traces. However, this result is interesting when we compare it against the direct solution model (Fig. 3 middle). The direct solution model solves most problems within fewer than 10 attempts—far fewer compared to DFS or the backtracking model. Fig. 3 right quantifies these observations. Specifically, for a fixed mistake budget, we use Jaccard Index to measure whether the model solves a similar set of problems as DFS solves. The backtracking model closely mirrors DFS search (high set similarity), whereas the direct solution model diverges significantly (low set similarity). Together with superior performance of the direct solution model, we conclude that the direct solution model learns more efficient search strategies, avoiding unnecessary explorations of wrong paths." + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.91, + 0.66, + 0.925 + ], + "angle": 0, + "content": "1 Mistakes are counted only for problems solved correctly by the model." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.106, + 0.538, + 0.309 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.108, + 0.828, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.322, + 0.828, + 0.396 + ], + "angle": 0, + "content": "Figure 4: Two different variations to improve backtracking model. (a). We hypothesize that the backtracking model can think one step ahead without sacrificing its ability to search. Therefore, we shorten the search trace by skipping the last search step. (b). Two data variations that improve the backtracking model. Mixed-backtrack model trained on a diverse set of search strategies. Think-backtracking model trained on shortened DFS trace." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.426, + 0.527, + 0.442 + ], + "angle": 0, + "content": "4.3 Two ways to improve backtracking model" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.447, + 0.828, + 0.56 + ], + "angle": 0, + "content": "Training on diverse set of search strategies. Our analysis suggests a clear direction for improving the backtracking model: using better search strategies to improve backtracking traces. Beyond DFS, we explored alternatives including Breadth-First Search (BFS) and various heuristic methods (see Appendix G.1). Despite these efforts, no single search strategy significantly outperformed DFS. Inspired by Gandhi et al. (2024), we trained a variant of the backtracking model—mix-backtrack model—using a diverse mixture of BFS and DFS strategies (32 in total), aiming to help the model discover more optimal search patterns." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.573, + 0.829, + 0.687 + ], + "angle": 0, + "content": "Backtracking model thinks less and talks more. Apart from learning suboptimal search strategies, another inefficiency in the backtracking model is caused by the model learns to be excessively verbose. Specifically, by requiring the model to explicitly output every step of the DFS, we may prevent it from internalizing part of the reasoning process. Concretely, we hypothesize that for CountDown, the model can internally plan at least one step ahead, allowing it to shorten its explicit reasoning trace without losing its ability to perform DFS. To test hypothesis, we train a variation—the think-backtrack model—on shortened DFS traces, skipping one intermediate step (Fig. 4, A)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.7, + 0.829, + 0.8 + ], + "angle": 0, + "content": "Mix-strategy results. Fig. 4 (B) compares this mixed-strategy model against the original backtracking and direct solution models. We also include a training data upper bound, representing perfect execution of the mixed search strategies. The mixed-strategy model improves over the original backtracking model and closely approaches its training-data upper bound. However, even with deliberate attempts to optimize search strategies, surpassing the performance of the direct solution model remains challenging. This experiment underscores the inherent difficulty in identifying superior handcrafted search traces." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.812, + 0.829, + 0.927 + ], + "angle": 0, + "content": "Think-backtrack results. Fig. 4 (B) also compares the performance of the think-backtrack model. By encouraging the model to internalize parts of the reasoning process, the think-backtrack model achieves performances comparable to the direct solution model. This result suggests that models with backtracking ability might produce long but ineffective CoT. By training the model to avoid making the mistakes at the first place, we reduce model morbidity without sacrificing its search capability, and in turn improving test-time-compute scaling. As an additional evidence, in Appendix G.2, we show that the think-backtrack model solves a superset of test problems solved by the original backtrack model." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.175, + 0.104, + 0.825, + 0.482 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.169, + 0.495, + 0.829, + 0.568 + ], + "angle": 0, + "content": "Figure 5: Different scaling behaviors for backtracking versus direct solution model. CountDown (A). Backtracking model performance does not improve as we scale up model size. (B). The direct solution model improves (C). Direct solution model consistently outperforms backtracking model. Sodomu (D, E). Both models' performances improve as we scale up model size. (F). Direct solution model consistently underperforms backtracking model." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.593, + 0.757, + 0.612 + ], + "angle": 0, + "content": "5 Model size and tree depth impact the efficacy of backtracking" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.618, + 0.828, + 0.704 + ], + "angle": 0, + "content": "While we've shown that backtracking might lead to ineffective test-time scaling, other factors also shape its effectiveness. In Section 5.1, we show that backtracking and direct solution models scale differently with model sizes. To explain the contrasting outcomes (Fig. 1 A) between CountDown and Selenium, in Appendix F, we show that task differences—particularly search tree depth—play a key role: deeper tasks like Selenium benefit more from backtracking." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.714, + 0.416, + 0.729 + ], + "angle": 0, + "content": "5.1 Dependence on model size" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.733, + 0.828, + 0.791 + ], + "angle": 0, + "content": "We now investigate how model size impacts the performance of backtracking and direct solution models. We evaluate four model scales—3M, 17M, 38M, and 144M—by proportionally increasing the number of attention heads, embedding dimensions, and number of attention layers. Detailed model configurations can be found in Appendix C.2." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.799, + 0.827, + 0.926 + ], + "angle": 0, + "content": "CountDown. Scaling up model size improves the performance of the direct solution model (Fig. 5 B) across all test-time-compute budgets. When trained exclusively on correct solutions, larger models can independently discover highly effective search strategies. In contrast, the backtracking model shows no improvements with increased model sizes (Fig. 5 A). The lack of improvement from model scaling can be explained by training data: The performance of backtracking model is constrained by the quality of the backtracking traces used for training. As previously seen in Fig. 4 (right), the 17M backtracking model is already approaching the performance ceiling that is set by the training data. Training larger models on the same backtracking data would not lead to further performance improvements. Due" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.825, + 0.135 + ], + "angle": 0, + "content": "to different scaling behaviors between backtracking and direct solution models, the gap in performances between two types of models widens with increasing model sizes (Fig. 5 C)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.141, + 0.828, + 0.296 + ], + "angle": 0, + "content": "Sudoku. Similar to CountDown, the performances of direct solution models improve with increased model sizes (Fig. 5 E). Unlike CountDown, however, the backtracking model also significantly benefits from scaling (Fig. 5 D). This difference can again be explained by examining the backtracking training data. Sodomu is inherently more complex than CountDown. The DFS backtracking traces successfully solve \\(97\\%\\) of test boards—far exceeding the current performance of all four tested model sizes. Because the backtracking model for Sodomu has not yet reached training data performance ceiling, increased model capacity leads to improved results. On the other hand, due to the complexity and large search space of the game, the backtracking models' performance gains start to diminish as the search traces become longer. As a result, the backtracking model consistently outperforms the direct solution model across scales, but the advantages diminishes at larger compute budgets (Fig. 5 E)." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.309, + 0.603, + 0.328 + ], + "angle": 0, + "content": "6 GRPO: Learning beyond the imitation game" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.334, + 0.829, + 0.379 + ], + "angle": 0, + "content": "So far, we have shown that under supervised learning, backtracking is not always optimal for scaling test-time compute. We now explore how further training both backtracking and direct solution models with reinforcement learning leads to qualitatively different outcomes." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.387, + 0.498, + 0.403 + ], + "angle": 0, + "content": "6.1 Continue training models with GRPO" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.406, + 0.827, + 0.547 + ], + "angle": 0, + "content": "Recently, RL has become a popular approach to further enhance LLMs performance on challenging benchmarks such as MATH (Hendrycks et al., 2021) and AIME (AIME, 2024). Here, we study the effects of RL in a controlled setting, focusing on how it impacts a model's backtracking behaviors (sequential search) and as well as a model's parallel search capability (sampling with best-of- \\( n \\)). We take the CountDown backtracking and direct solution models, which have been trained to convergence under the supervised learning objective (see Appendix G.3 for training curves). We then continue training each model using GRPO (Shao et al., 2024), following verl's (Sheng et al., 2024) implementation. We perform GRPO on the same training data used for the supervised learning. As before, we evaluate performance across different test-time compute budgets." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.557, + 0.599, + 0.572 + ], + "angle": 0, + "content": "6.2 Backtracking model discovers new search strategies" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.575, + 0.828, + 0.675 + ], + "angle": 0, + "content": "Figure 1 C shows that the backtracking model post GRPO sees an performance boost across all test-compute budgets. The post-GRPO model (dark red) reaches an accuracy comparable to the pre-GRPO direct solution model (light blue). This improvement is surprising for two reasons: (1) at maximum compute (4096 tokens), the model solves nearly \\(70\\%\\) of the test set—exceeding the performance of the DFS strategy used to generate training data \\((57\\%)\\); and (2) the model was trained on questions it has already seen during supervised learning, with no new problems introduced during GRPO." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.685, + 0.828, + 0.827 + ], + "angle": 0, + "content": "These gains suggest that the backtracking model, once freed from the constraints of predicting next token on DFS traces, can now discover better search strategies. To concretely show that the backtracking model post-GRPO learns search strategies different from DFS training traces, we revisit the mistake-counting analysis from Section 4.2.1 (Figure 3). For each test problem, we compute the number of mistakes as before (i.e., counting how many incorrect terminal nodes are explored before reaching a correct solution). Using the same set similarity measure as before, we quantify the strategy deviation in Figure 6 (left). The smaller Jaccard index values confirm that through GRPO, the backtracking model has learned new and more effective search behaviors. In Appendix G.4, we also show the per-problem scatter plot as done in Figure 3." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.835, + 0.536, + 0.851 + ], + "angle": 0, + "content": "6.3 Direct solution model specializes at pass@1" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.854, + 0.826, + 0.927 + ], + "angle": 0, + "content": "We now show that compared to backtracking models, GRPO has remarkably different effects on direct solution models. As shown in Figure 1 C, the direct solution model post-GRPO achieves strong performance at the smallest compute budget (pass@1), solving \\(42.5\\%\\) of unseen CountDown puzzles (82 out of 200 test problems). None of the handcrafted search strategies (Appendix G.1) can reach such high accuracy. To understand the impressive gain" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.179, + 0.101, + 0.511, + 0.265 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.547, + 0.102, + 0.82, + 0.265 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.277, + 0.825, + 0.337 + ], + "angle": 0, + "content": "Figure 6: GRPO has different effect on backtracking versus direct solution model Left: After GRPO, the backtracking model's search strategy starts to deviate away from the DFS search. Right: For problems the pre-GRPO direct solution model (blue) have a non-zero pass@k solving probabilities, the post-GRPO direct solution model (red) solves with pass@1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.361, + 0.827, + 0.462 + ], + "angle": 0, + "content": "on 1-shot performance, we examine those 82 problems, and discover that the pre-GRPO direct solution model was able to find correct solution by sampling best-of-\\(n\\) (with \\(n \\leq 64\\)). We now examine a model's solving probabilities (i.e., measuring pass@k rate out of the 64 generations). We compare the pass@k rate for the diret solution model pre and post GRPO, shown in Figure 6, right. We rank the 200 test problems by the pre-GRPO model's solving probabilities. For problems that the pre-GRPO model has a non-zero pass@k rate, the post-GRPO model can solve most of them with pass@1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.47, + 0.825, + 0.557 + ], + "angle": 0, + "content": "However, this improvement in 1-shot performance comes with a substantial trade-off: the model loses its ability to generate diverse solutions. As a result, when we perform parallel search using best-of-\\(n\\), the direct solution model post-GRPO fail to explore different solution paths, hurting its test-time-scaling effectiveness. Therefore, test-time compute scaling becomes ineffective as we increase compute budgets, forming a sharp contrast to the backtracking model's consistent improvements across the full compute budget." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.568, + 0.455, + 0.584 + ], + "angle": 0, + "content": "7 Conclusion and discussions" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.591, + 0.827, + 0.718 + ], + "angle": 0, + "content": "In this work, we conducted a controlled empirical investigation into the efficacy of teaching backtracking to large language models (LLMs) as a method for scaling test-time computation. Using two strategic games, CountDown and Sudoku, we demonstrated that backtracking does not universally outperform parallel solution strategies; rather, its effectiveness depends significantly on task characteristics, model scale, and training approach. Appendix A, we show that our results in synthetic setting generalize: even in real-world reasoning tasks, backtracking is not always beneficial. Additionally, our reinforcement learning experiments uncovered a unique synergy between backtracking capabilities and RL-based training, enabling models to discover novel strategies." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.725, + 0.827, + 0.88 + ], + "angle": 0, + "content": "Limitations and future work. While our experiments relied on two strategic games (Count-Down and Sudo) and models trained from scratch—common practices for controlled studies—an important avenue for future research is extending our findings to complex, real-world reasoning tasks such as coding and mathematical problem-solving. For future work, developing precise ways to characterize tasks that benefit from backtracking will be valuable for guiding model training. Finally, while we intentionally created a dichotomy between pure backtracking and direct-solution models, real-world applications may require hybrid strategies that dynamically choose between direct generation and explicit search based on problem complexity. Investigating whether LLMs can autonomously optimize their reasoning modes, particularly through reinforcement learning paradigms, is a promising future direction for improving the flexibility and efficiency of model reasoning." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.347, + 0.12 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.126, + 0.828, + 0.185 + ], + "angle": 0, + "content": "We thank Core Francisco Park and Bingbin Liu for helpful discussions and feedback throughout the development of this work. TQ and DAM acknowledge support from the Kempner Institute, the Aramont Fellowship Fund, and the FAS Dean's Competitive Fund for Promising Scholarship." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.197, + 0.276, + 0.213 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.224, + 0.826, + 0.253 + ], + "angle": 0, + "content": "AIME. American invitational mathematics examination, 2024. URL https://maa.org/math-competitions/american-invitational-mathematics-examination-aime." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.263, + 0.827, + 0.306 + ], + "angle": 0, + "content": "Joshua Ainslie, James Lee-Thorp, Michiel de Jong, Yury Zemlyanskiy, Federico Lebrón, and Sumit Sanghai. GQA: Training generalized multi-query transformer models from multi-head checkpoints. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2305.13245." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.314, + 0.827, + 0.37 + ], + "angle": 0, + "content": "Afra Feyza Akyurek, Ekin Akyurek, Aman Madaan, Ashwin Kalyan, Peter Clark, Derry Wijaya, and Niket Tandon. RL4F: Generating natural language feedback with reinforcement learning for repairing model outputs. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2305.08844." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.38, + 0.827, + 0.423 + ], + "angle": 0, + "content": "Zachary Ankner, Mansheej Paul, Brandon Cui, Jonathan D Chang, and Prithviraj Ammanabrolu. Critique-out-loud reward models. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2408.11791." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.432, + 0.826, + 0.476 + ], + "angle": 0, + "content": "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2407.21787." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.484, + 0.827, + 0.527 + ], + "angle": 0, + "content": "Sehyun Choi, Tianqing Fang, Zhaowei Wang, and Yangqiu Song. KCTS: Knowledge-constrained tree search decoding with token-level hallucination detection. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2310.09044." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.535, + 0.827, + 0.925 + ], + "angle": 0, + "content": "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z F Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J L Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R J Chen, R L Jin, Ruyi Chen, Shanghai Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, S S Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun, T Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W L Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X Q Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y K Li, Y Q Wang, Y X Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong. Ying He,Yishi Piao,Yisong Wang,Yixuan Tan,Yiyang Ma,Yiyuan Liu,Yongqiang Guo. Yuan OuYuduan Wang Yue Gong.Yuheng ZouYujia HeYunfan XiongYuxiang Luo. Yuxiang YouYuxuan LiuYuyang ZhouYX ZhuYanhong Xu,Yanping Huang,Yaohui Li,Yi Zheng,Yuchen Zhu,Yunxian Ma,Ying Tang,Yukun Zha,Yuting YanZ Z Ren Zehui Ren,Zhangli Sha,Zhe Fu,Zhean XuZhenda XieZhengyan Zhang,Zhenwen Hao. Zhicheng Ma,Zhigang Yan,Zhiyu WuZihui GuZijia ZhuZijun LiuZilin LiZiwei Xie" + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.224, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.103, + 0.828, + 0.147 + ], + "angle": 0, + "content": "Ziyang Song, Zizheng Pan, Zhen Huang, Zhipeng Xu, Zhongyu Zhang, and Zhen Zhang. DeepSeek-R1: Incentivizing reasoning capability in LLMs via reinforcement learning. arXiv [cs.CL], 2025. URL http://arxiv.org/abs/2501.12948." + }, + { + "type": "ref_text", + "bbox": [ + 0.176, + 0.155, + 0.827, + 0.199 + ], + "angle": 0, + "content": "Kanishk Gandhi, Denise Lee, Gabriel Grand, Muxin Liu, Winson Cheng, Archit Sharma, and Noah D Goodman. Stream of search (SoS): Learning to search in language. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2404.03683." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.207, + 0.827, + 0.251 + ], + "angle": 0, + "content": "Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective STaRs. arXiv [cs.CL], 2025. URL http://arxiv.org/abs/2503.01307." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.259, + 0.827, + 0.316 + ], + "angle": 0, + "content": "Alex Havrilla, Sharath Raparthy, Christoforus Nalmpantis, Jane Dwivedi-Yu, Maksym Zhuravinskyi, Eric Hambro, and Roberta Raileanu. GLoRe: When, where, and how to improve LLM reasoning via global and local refinements. arXiv [cs.CL], 2024. URL http://arxiv.org/abs/2402.10963." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.324, + 0.827, + 0.368 + ], + "angle": 0, + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the MATH dataset. arXiv [cs.LG], 2021. URL http://arxiv.org/abs/2103.03874." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.375, + 0.825, + 0.42 + ], + "angle": 0, + "content": "Zhenyu Hou, Xin Lv, Rui Lu, Jiajie Zhang, Yujiang Li, Zijun Yao, Juanzi Li, Jie Tang, and Yuxiao Dong. Advancing language model reasoning through reinforcement learning and inference scaling. arXiv [cs.LG], 2025. URL http://arxiv.org/abs/2501.11651." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.427, + 0.828, + 0.497 + ], + "angle": 0, + "content": "Robert Irvine, Douglas Boubert, Vyas Raina, Adrian Liusie, Ziyi Zhu, Vineet Mudupalli, Aliaksei Korshuk, Zongyi Liu, Fritz Cremer, Valentin Assassi, Christie-Carol Beauchamp, Xiaoding Lu, Thomas Rialan, and William Beauchamp. Rewarding chatbots for real-world engagement with millions of users. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2303.06135." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.506, + 0.828, + 0.563 + ], + "angle": 0, + "content": "Amirhossein Kazemnejad, Milad Aghajohari, Eva Portelance, Alessandro Sordoni, Siva Reddy, Aaron Courville, and Nicolas Le Roux. VinePPO: Unlocking RL potential for LLM reasoning through refined credit assignment. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2410.01679." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.572, + 0.828, + 0.643 + ], + "angle": 0, + "content": "Aviral Kumar, Vincent Zhuang, Rishabh Agarwal, Yi Su, John D Co-Reyes, Avi Singh, Kate Baumli, Shariq Iqbal, Colton Bishop, Rebecca Roelofs, Lei M Zhang, Kay McKinney, Disha Shrivastava, Cosmin Paduraru, George Tucker, Doina Precup, Feryal Behbahani, and Aleksandra Faust. Training language models to self-correct via reinforcement learning. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2409.12917." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.651, + 0.825, + 0.681 + ], + "angle": 0, + "content": "Ariel N Lee, Cole J Hunter, and Nataniel Ruiz. Platypus: Quick, cheap, and powerful refinement of LLMs. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2308.07317." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.688, + 0.828, + 0.732 + ], + "angle": 0, + "content": "Kuang-Huei Lee, Ian Fischer, Yueh-Hua Wu, Dave Marwood, Shumeet Baluja, Dale Schuurmans, and Xinyun Chen. Evolving deeper LLM thinking. arXiv [cs.AI], 2025. URL http://arxiv.org/abs/2501.09891." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.74, + 0.827, + 0.77 + ], + "angle": 0, + "content": "Noam Levi. A simple model of inference scaling laws. arXiv [stat.ML], 2024. URL http://arxiv.org/abs/2410.16377." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.778, + 0.828, + 0.822 + ], + "angle": 0, + "content": "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. arXiv [cs.LG], 2023. URL http://arxiv.org/abs/2305.20050." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.83, + 0.825, + 0.886 + ], + "angle": 0, + "content": "Jiacheng Liu, Andrew Cohen, Ramakanth Pasunuru, Yejin Choi, Hannaneh Hajishirzi, and Asli Celikyilmaz. Don't throw away your value model! generating more preferable text with value-guided monte-carlo tree search decoding. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2309.15028." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.825, + 0.924 + ], + "angle": 0, + "content": "Jieyi Long. Large language model guided tree-of-thought. arXiv [cs.AI], 2023. URL http://arxiv.org/abs/2305.08291." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.162 + ], + "angle": 0, + "content": "Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Meiqi Guo, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, and Abhinav Rastogi. Improve mathematical reasoning in language models by automated process supervision. arXiv [cs.CL], 2024. URL http://arxiv.org/abs/2406.06592." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.169, + 0.827, + 0.215 + ], + "angle": 0, + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv [cs.CL], 2025. URL http://arxiv.org/abs/2501.19393." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.222, + 0.825, + 0.252 + ], + "angle": 0, + "content": "Aleksei Maslakov And Papadimas. Sudoku solver with step-by-step guidance, 2023. URL https://github.com/unmade/dokusan, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.261, + 0.827, + 0.305 + ], + "angle": 0, + "content": "Debjit Paul, Mete Ismayilzada, Maxime Peyrard, Beatrix Borges, Antoine Bosselut, Robert West, and Boi Faltings. REFINER: Reasoning feedback on intermediate representations. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2304.01904." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.313, + 0.827, + 0.357 + ], + "angle": 0, + "content": "Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive IntroSpEction: Teaching language model agents how to self-improve. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2407.18219." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.366, + 0.827, + 0.397 + ], + "angle": 0, + "content": "David Radcliffe. 3 million crossword puzzles with ratings. Website, 2020. URL https://www.kaggle.com/datasets/radcliffe/3-million-sudoku-puzzles-with-ratings." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.405, + 0.825, + 0.449 + ], + "angle": 0, + "content": "William Saunders, Catherine Yeh, Jeff Wu, Steven Bills, Long Ouyang, Jonathan Ward, and Jan Leike. Self-critiquing models for assisting human evaluators. arXiv [cs.CL], 2022. URL http://arxiv.org/abs/2206.05802." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.457, + 0.825, + 0.502 + ], + "angle": 0, + "content": "Kulin Shah, Nishanth Dikkala, Xin Wang, and Rina Panigrahy. Causal language modeling can elicit search and reasoning capabilities on logic puzzles. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2409.10502." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.51, + 0.825, + 0.554 + ], + "angle": 0, + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Mingchuan Zhang, Y K Li, Y Wu, and Daya Guo. DeepSeekMath: Pushing the limits of mathematical reasoning in open language models. arXiv [cs.CL], 2024. URL http://arxiv.org/abs/2402.03300." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.563, + 0.827, + 0.607 + ], + "angle": 0, + "content": "Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. HybridFlow: A flexible and efficient RLHF framework. arXiv preprint arXiv: 2409.19256, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.616, + 0.827, + 0.658 + ], + "angle": 0, + "content": "Jianlin Su, Yu Lu, Shengfeng Pan, Ahmed Murtadha, Bo Wen, and Yunfeng Liu. RoFormer: Enhanced transformer with rotary position embedding. arXiv [cs.CL], 2021. URL http://arxiv.org/abs/2104.09864." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.668, + 0.828, + 0.752 + ], + "angle": 0, + "content": "Peiyi Wang, Lei Li, Zhihong Shao, Runxin Xu, Damai Dai, Yifei Li, Deli Chen, Yu Wu, and Zhifang Sui. Math-shepherd: Verify and reinforce LLMs step-by-step without human annotations. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 9426-9439, Stroudsburg, PA, USA, 2024. Association for Computational Linguistics. URL http://dx.doi.org/10.18653/v1/2024.acl-long.510." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.762, + 0.828, + 0.82 + ], + "angle": 0, + "content": "Tianlu Wang, Ping Yu, Xiaoqing Ellen Tan, Sean O'Brien, Ramakanth Pasunuru, Jane Dwivedi-Yu, Olga Golovneva, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. Shepherd: A critic for language model generation. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2308.04592." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.828, + 0.825, + 0.872 + ], + "angle": 0, + "content": "Sean Welleck, Ximing Lu, Peter West, Faeze Brahman, Tianxiao Shen, Daniel Khashabi, and Yejin Choi. Generating sequences by learning to self-correct. arXiv [cs.CL], 2022. URL http://arxiv.org/abs/2211.00053." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.881, + 0.825, + 0.926 + ], + "angle": 0, + "content": "Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models. arXiv [cs.AI], 2024. URL http://arxiv.org/abs/2408.00724." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.103, + 0.828, + 0.147 + ], + "angle": 0, + "content": "Yuxi Xie, Kenji Kawaguchi, Yiran Zhao, Xu Zhao, Min-Yen Kan, Junxian He, and Qizhe Xie. Self-evaluation guided beam search for reasoning. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2305.00633." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.155, + 0.827, + 0.211 + ], + "angle": 0, + "content": "Huajian Xin, Daya Guo, Zhihong Shao, Zhizhou Ren, Qihao Zhu, Bo Liu, Chong Ruan, Wenda Li, and Xiaodan Liang. DeepSeek-prover: Advancing theorem proving in LLMs through large-scale synthetic data. arXiv [cs.AI], 2024. URL http://arxiv.org/abs/2405.14333." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.22, + 0.827, + 0.32 + ], + "angle": 0, + "content": "An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, Keming Lu, Keqin Bao, Kexin Yang, Le Yu, Mei Li, Mingfeng Xue, Pei Zhang, Qin Zhu, Rui Men, Runji Lin, Tianhao Li, Tingyu Xia, Xingzhang Ren, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yu Wan, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zihan Qiu. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.328, + 0.825, + 0.371 + ], + "angle": 0, + "content": "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. arXiv [cs.CL], 2023a. URL http://arxiv.org/abs/2305.10601." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.379, + 0.827, + 0.449 + ], + "angle": 0, + "content": "Weiran Yao, Shelby Heinecke, Juan Carlos Niebles, Zhiwei Liu, Yihao Feng, Le Xue, Rithesh Murthy, Zeyuan Chen, Jianguo Zhang, Devansh Arpit, Ran Xu, Phil Mui, Huan Wang, Caiming Xiong, and Silvio Savarese. Retroformer: Retrospective large language agents with policy gradient optimization. arXiv [cs.CL], 2023b. URL http://arxiv.org/abs/2308.02151." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.458, + 0.825, + 0.502 + ], + "angle": 0, + "content": "Seonghyeon Ye, Yongrae Jo, Doyoung Kim, Sungdong Kim, Hyeonbin Hwang, and Minjoon Seo. SelFee: Iterative self-revising LLM empowered by self-feedback generation. Blog post, 2023. URL https://kaistai.github.io/SelFee/." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.51, + 0.825, + 0.54 + ], + "angle": 0, + "content": "Eric Zelikman, Yuhuai Wu, Jesse Mu, and Noah D Goodman. STaR: Bootstrapping reasoning with reasoning. arXiv [cs.LG], 2022. URL http://arxiv.org/abs/2203.14465." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.548, + 0.825, + 0.591 + ], + "angle": 0, + "content": "Shun Zhang, Zhenfang Chen, Yikang Shen, Mingyu Ding, Joshua B Tenenbaum, and Chuang Gan. Planning with large language models for code generation. arXiv [cs.LG], 2023. URL http://arxiv.org/abs/2303.05510." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.599, + 0.827, + 0.643 + ], + "angle": 0, + "content": "Yunxiang Zhang, Muhammad Khalifa, Lajanugen Logeswaran, Jaekyeom Kim, Moontae Lee, Honglak Lee, and Lu Wang. Small language models need strong verifiers to self-correct reasoning. arXiv [cs.CL], 2024. URL http://arxiv.org/abs/2404.17140." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.651, + 0.827, + 0.694 + ], + "angle": 0, + "content": "Andy Zhou, Kai Yan, Michal Shlapentokh-Rothman, Haohan Wang, and Yu-Xiong Wang. Language agent tree search unifies reasoning acting and planning in language models. arXiv [cs.AI], 2023. URL http://arxiv.org/abs/2310.04406." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.828, + 0.694 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.032, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image_caption", + "bbox": [ + 0.231, + 0.103, + 0.501, + 0.117 + ], + "angle": 0, + "content": "Math-500 Performance vs. Test Compute" + }, + { + "type": "image", + "bbox": [ + 0.214, + 0.118, + 0.484, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.538, + 0.103, + 0.782, + 0.116 + ], + "angle": 0, + "content": "GPQA Performance vs. Test Compute" + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.117, + 0.778, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.288, + 0.828, + 0.387 + ], + "angle": 0, + "content": "Figure 7: Evaluating backtracking on real LLMs. Left: On MATH-500, we compare the S1 model (fine-tuned on backtracking traces) using sequential decoding with budget forcing, against its base model (Qwen2.5-32B-Instruct) using parallel sampling. The backtracking model underperforms at low compute but narrows the gap at higher budgets. Right: On GPQA, the same backtracking setup outperforms parallel sampling in a multiple-choice reasoning setting. This comparison generalizes our conclusion from synthetic settings to real LLMs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.412, + 0.696, + 0.431 + ], + "angle": 0, + "content": "A Backtracking Analysis on Math Reasoning with LLMs" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.438, + 0.367, + 0.454 + ], + "angle": 0, + "content": "A.1 Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.456, + 0.828, + 0.501 + ], + "angle": 0, + "content": "To complement our synthetic experiments, we conduct an evaluation on real-world math problems to examine whether backtracking remains effective under equal test-time compute. We compare two approaches:" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.512, + 0.825, + 0.54 + ], + "angle": 0, + "content": "- Backtracking model: fine-tuned on solution traces that include explicit self-correction and step-by-step reflection." + }, + { + "type": "text", + "bbox": [ + 0.176, + 0.542, + 0.825, + 0.571 + ], + "angle": 0, + "content": "- Direct solution model: the base model without backtracking fine-tuning, using parallel sampling (with majority voting for final correct answer) at inference." + }, + { + "type": "list", + "bbox": [ + 0.176, + 0.512, + 0.825, + 0.571 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.584, + 0.827, + 0.628 + ], + "angle": 0, + "content": "To control test-time compute, we use the budget forcing technique introduced in (Muen-nighoff et al., 2025). This enables a fair comparison across models with differing reasoning styles." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.635, + 0.826, + 0.693 + ], + "angle": 0, + "content": "Backtracking Model. We adopt the S1 checkpoint from Muennighoff et al. (2025), a model trained on solution traces distilled from DeepSeekR1. These traces exhibit explicit backtracking behaviors—identifying and correcting earlier mistakes. We generate outputs with temperature \\( T = 0.7 \\) under budget forcing and evaluate on the MATH-500." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.7, + 0.829, + 0.744 + ], + "angle": 0, + "content": "Direct Solution Model. For fair comparison, we use the same base model as S1—Qwen2.5-32B-Instruct—without backtracking fine-tuning. We sample \\(N = 1\\) to 8 completions with temperature \\(T = 0.7\\), and report both Best-of-\\(N\\) and Majority Vote accuracy." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.753, + 0.373, + 0.769 + ], + "angle": 0, + "content": "Results and Interpretation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.772, + 0.826, + 0.857 + ], + "angle": 0, + "content": "Figure 7 (left) presents accuracy under matched compute budgets. We observe that at low compute budgets the backtracking model underperforms due to its verbose reasoning traces. At higher budgets, backtracking matches and slightly exceeds the performance of parallel sampling. This mirrors trends observed in the CountDown (Section 4.3), and suggests that while backtracking introduces overhead, it yields benefits when sufficient compute is available." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.868, + 0.828, + 0.926 + ], + "angle": 0, + "content": "To form a sharp contrast, we reproduce results from (Muennighoff et al., 2025) on GPQA-Diamond (Figure 7, right), which shows that the same backtracking model significantly outperforms parallel sampling—even at lower budgets—in a multiple-choice setting. This contrast highlights that the effectiveness of backtracking is task-dependent." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.828, + 0.177 + ], + "angle": 0, + "content": "This real-world evaluation supports our synthetic findings: backtracking improves performance under compute constraints, but its advantage depends on the task structure. On open-ended math problems, the benefit is most pronounced at higher budgets. On structured tasks like multiple-choice QA, gains can appear even earlier. Overall, our conclusions generalize beyond synthetic settings." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.187, + 0.421, + 0.204 + ], + "angle": 0, + "content": "B Related Work Extended" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.212, + 0.442, + 0.228 + ], + "angle": 0, + "content": "B.1 Test-time computation scaling" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.23, + 0.827, + 0.329 + ], + "angle": 0, + "content": "A growing body of work has explored how to improve language model performance by scaling test-time computation. These approaches typically fall into two broad categories: parallel and sequential search. Parallel methods sample multiple solutions independently and select the best one using predefined criteria—such as majority voting or external reward models—as seen in Best-of-\\(N\\) techniques (Brown et al., 2024; Irvine et al., 2023; Levi, 2024). These methods often rely on outcome-based reward models that score complete solutions (Xin et al., 2024; Ankner et al., 2024)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.34, + 0.828, + 0.455 + ], + "angle": 0, + "content": "In contrast, sequential methods iteratively refine reasoning by conditioning on previous attempts. This class includes stepwise improvement methods(Ankner et al., 2024; Hou et al., 2025; Lee et al., 2025), where each new trajectory builds on earlier outputs, enabling the model to adapt its reasoning dynamically. Other research works have also explored using the search process itself to improve model reasoning capabilities, either during inference or by integrating the feedback into training(Wang et al., 2024; Luo et al., 2024). While these methods can reduce redundancy, they typically require more compute per sample and may suffer from compounding errors." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.463, + 0.828, + 0.577 + ], + "angle": 0, + "content": "Tree-based approaches, such as Monte Carlo Tree Search (MCTS) and guided beam search, represent a hybrid between parallel and sequential strategies(Gandhi et al., 2024; Liu et al., 2023; Zhang et al., 2023; Zhou et al., 2023; Choi et al., 2023; Xie et al., 2023). These methods often leverage process reward models, which assign value to intermediate reasoning steps rather than full outputs(Lightman et al., 2023; Wang et al., 2024; Wu et al., 2024). REBASE(Wu et al., 2024), for example, uses a process reward model to guide exploration and pruning in tree search, and has been shown to outperform both sampling-based methods and traditional MCTS." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.585, + 0.46, + 0.601 + ], + "angle": 0, + "content": "B.2 Self-correction and backtracking" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.603, + 0.829, + 0.785 + ], + "angle": 0, + "content": "Search and backtracking are inherently tied to self-correction, as they enable models to revisit earlier decisions and recover from errors—a critical capability for multi-step reasoning. Teaching language models to self-correct has been approached through fine-tuning on revision demonstrations from humans or stronger models(Saunders et al., 2022; Ye et al., 2023; Qu et al., 2024), as well as through synthetic data generation and handcrafted augmentation(Paul et al., 2023; Wang et al., 2023; Lee et al., 2023). Reward-based methods provide another avenue, using outcome- or process-level signals to differentiate good and bad reasoning trajectories, often framed as implicit policy learning(Welleck et al., 2022; Akyurek et al., 2023; Zhang et al., 2024). Some methods further incorporate search, critique generation, or separate correction modules to enhance reasoning quality(Yao et al., 2023b; Havrilla et al., 2024). In contrast, using two structured games, we investigate the tradeoffs of teaching models to backtrack via search traces versus allowing them to learn purely from correct solutions." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.794, + 0.538, + 0.81 + ], + "angle": 0, + "content": "B.3 Reinforcement learning for LLM reasoning" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.813, + 0.828, + 0.927 + ], + "angle": 0, + "content": "Reinforcement learning (RL) has emerged as a powerful framework for improving the reasoning abilities of language models. While early work applied off-policy and on-policy RL methods to guide models toward verifiable outcomes(Zelikman et al., 2022; Kazemnejad et al., 2024), recent approaches have shown that even simplified algorithms like GRPO can lead to significant performance gains and the emergence of in-context search behavior (DeepSeek-AI et al., 2025; Shao et al., 2024; DeepSeek-AI et al., 2025). These advances suggest that RL can help models autonomously discover more effective reasoning strategies, even without explicit reward models or structured search. However, not all models benefit" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.949, + 0.509, + 0.961 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.828, + 0.162 + ], + "angle": 0, + "content": "equally from RL, and it remains unclear what properties make a model amenable to learning through reinforcement. Our work contributes to this question by examining how backtracking models, when trained with GRPO, can discover novel solution strategies—while no-backtracking models show limited or mixed gains." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.176, + 0.381, + 0.194 + ], + "angle": 0, + "content": "C Experiment details" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.202, + 0.544, + 0.218 + ], + "angle": 0, + "content": "C.1 Additional details on game, data generation" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.221, + 0.827, + 0.295 + ], + "angle": 0, + "content": "CountDown tree size computation. CountDown has an exponentially growing search space with respect to the number of candidate numbers. If the current state has \\( N \\) available numbers, there are \\( \\binom{N}{2} \\times 4 \\) possible actions (selecting a pair and one of four operations), and the depth of the tree is \\( N - 1 \\). For games with four candidate numbers, the complete search tree contains 1,152 nodes." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.305, + 0.825, + 0.419 + ], + "angle": 0, + "content": "CountDown search. To generate DFS search data, we use a sum heuristic to guide the search order and prune nodes. This heuristic measures the distance between the sum of all input numbers and the target number, and prunes a node if the absolute distance exceeds the target. This approach is inspired by Gandhi et al. (2024), who also consider an alternative—the multiply heuristic—which measures the minimum distance between the input set and the factors of the target. However, in our experiments, both heuristics yield similar performance: for a fixed number of visited nodes, DFS with either heuristic solves approximately the same number of games." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.427, + 0.825, + 0.498 + ], + "angle": 0, + "content": "Sudoku rule. In a Sudoku game, the player is given a \\( 9 \\times 9 \\) grid in which each cell must be filled with a digit from 1 to 9. The puzzle is subject to three constraints: each row, each column, and each of the nine \\( 3 \\times 3 \\) subgrids must contain all digits from 1 to 9 exactly once. Given a partially filled grid, the objective is to fill in the remaining cells such that all constraints are satisfied." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.508, + 0.827, + 0.593 + ], + "angle": 0, + "content": "Sudoku data and tokenization. To represent theiku board for language models, we encode each cell as a position-value pair: \\((x,y) = v\\), where \\((x,y)\\) denotes the grid location and \\(v\\) is the cell's value. The model receives the initial board as a list of known \\((x,y) = v\\) pairs and generates the solution by predicting the values for the remaining cells. We generate backtracking traces by serializing the full DFS traversal. For the direct solution model, we prune each trace to include only the final solution path." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.602, + 0.825, + 0.673 + ], + "angle": 0, + "content": "Scoring. For CountDown, a solution is correct only if it adheres to game rules and correctly achieves the target number. For Selenium, correctness requires fully solving the board, with no partial credit given for incomplete but correct boards. Models are tested on 200 unseen problems per game. The same scoring function is used as the reward function in GRPO (Section 6)" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.684, + 0.52, + 0.699 + ], + "angle": 0, + "content": "C.2 Additional details on model architecture" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.704, + 0.526, + 0.72 + ], + "angle": 0, + "content": "Model hyperparameters can be found in Table 1." + }, + { + "type": "table", + "bbox": [ + 0.188, + 0.735, + 0.812, + 0.822 + ], + "angle": 0, + "content": "
Model SizeHidden SizeLayersAttn HeadsIntermediate SizeKV Heads
3M256645121
17M5128410241
38M51210820482
144M102412830722
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.831, + 0.825, + 0.861 + ], + "angle": 0, + "content": "Table 1: Qwen2.5-style architecture configurations for the four model sizes used in our experiments." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.876, + 0.407, + 0.892 + ], + "angle": 0, + "content": "C.3 Training hyperparameter" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.895, + 0.825, + 0.924 + ], + "angle": 0, + "content": "Training hyperparameters can be found in Table 2. We train all models on 2 NVIDIA H100 80GB HBM3 GPUs." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.032, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "table", + "bbox": [ + 0.317, + 0.102, + 0.681, + 0.342 + ], + "angle": 0, + "content": "
HyperparameterValue
Optimization
Learning rate1 × 10-5
Weight decay0.01
Learning Rate Schedule
Scheduler typeCosine
Warmup steps1
Training Setup
Epochs30
Batch size (backtracking model)32
Batch size (direct solution model)64
Context length (backtracking model)4096
Context length (direct solution model)512
Tokenizer
Tokenizer size (CountDown)74
Tokenizer size (Sudoku)110
" + }, + { + "type": "table_caption", + "bbox": [ + 0.17, + 0.351, + 0.825, + 0.381 + ], + "angle": 0, + "content": "Table 2: Training hyperparameters used for all experiments. Batch size and context length vary based on model type." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.403, + 0.383, + 0.421 + ], + "angle": 0, + "content": "D FLOP computation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.427, + 0.827, + 0.483 + ], + "angle": 0, + "content": "To compare backtracking and direct solution models under a fixed compute budget, we estimate inference FLOPs based on model architecture and generation length \\( T \\). We use a simplified transformer FLOP computation that accounts for per-token operations across all layers." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.494, + 0.592, + 0.511 + ], + "angle": 0, + "content": "Below is a list of architectural and generation parameters:" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.521, + 0.384, + 0.536 + ], + "angle": 0, + "content": "- \\(d_{\\mathrm{model}}\\): hidden dimension" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.538, + 0.397, + 0.554 + ], + "angle": 0, + "content": "\\(d_{\\mathrm{kv}}\\) : key/value dimension" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.555, + 0.501, + 0.571 + ], + "angle": 0, + "content": "- \\(d_{\\mathrm{ff}}\\): intermediate (feedforward) dimension" + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.572, + 0.341, + 0.587 + ], + "angle": 0, + "content": "- \\( L \\) : number of layers" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.588, + 0.568, + 0.604 + ], + "angle": 0, + "content": "- \\(T\\): number of generated tokens (i.e., context length)" + }, + { + "type": "text", + "bbox": [ + 0.177, + 0.605, + 0.655, + 0.62 + ], + "angle": 0, + "content": "- \\(N\\): number of sequences generated (e.g., in best-of-\\(N\\) sampling)" + }, + { + "type": "list", + "bbox": [ + 0.177, + 0.521, + 0.655, + 0.62 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.628, + 0.458, + 0.644 + ], + "angle": 0, + "content": "D.1 Step-by-step FLOPs Calculation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.647, + 0.825, + 0.677 + ], + "angle": 0, + "content": "1. Per-layer linear FLOPs per token. We break down the linear FLOPs for each transformer layer into attention and MLP components:" + }, + { + "type": "title", + "bbox": [ + 0.177, + 0.688, + 0.305, + 0.701 + ], + "angle": 0, + "content": "Self-attention:" + }, + { + "type": "text", + "bbox": [ + 0.21, + 0.702, + 0.469, + 0.718 + ], + "angle": 0, + "content": "- Query projection: \\(d_{\\mathrm{model}} \\times d_{\\mathrm{model}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.718, + 0.432, + 0.732 + ], + "angle": 0, + "content": "- Key projection: \\(d_{\\mathrm{model}} \\times d_{\\mathrm{kV}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.732, + 0.443, + 0.746 + ], + "angle": 0, + "content": "- Value projection: \\(d_{\\mathrm{model}} \\times d_{\\mathrm{kv}}\\)" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.746, + 0.475, + 0.759 + ], + "angle": 0, + "content": "- Output projection: \\(d_{\\mathrm{model}} \\times d_{\\mathrm{model}}\\)" + }, + { + "type": "list", + "bbox": [ + 0.21, + 0.702, + 0.475, + 0.759 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.759, + 0.371, + 0.771 + ], + "angle": 0, + "content": "This results in a total of:" + }, + { + "type": "equation", + "bbox": [ + 0.354, + 0.775, + 0.663, + 0.794 + ], + "angle": 0, + "content": "\\[\n\\mathrm {F L O P s} _ {\\text {a t t e n t i o n - l i n e a r}} = 2 d _ {\\text {m o d e l}} ^ {2} + 2 d _ {\\text {m o d e l}} d _ {\\mathrm {k v}}\n\\]" + }, + { + "type": "title", + "bbox": [ + 0.177, + 0.796, + 0.349, + 0.809 + ], + "angle": 0, + "content": "MLP (Feedforward):" + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.811, + 0.508, + 0.825 + ], + "angle": 0, + "content": "MLP layers include following components:" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.825, + 0.328, + 0.839 + ], + "angle": 0, + "content": "- Gate projection" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.841, + 0.316, + 0.856 + ], + "angle": 0, + "content": "- Up projection" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.857, + 0.338, + 0.871 + ], + "angle": 0, + "content": "- Down projection" + }, + { + "type": "list", + "bbox": [ + 0.198, + 0.825, + 0.338, + 0.871 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.194, + 0.871, + 0.568, + 0.886 + ], + "angle": 0, + "content": "Each of these MLP layers costs: \\( d_{\\mathrm{model}} \\times d_{\\mathrm{ff}} \\), giving:" + }, + { + "type": "equation", + "bbox": [ + 0.425, + 0.89, + 0.594, + 0.907 + ], + "angle": 0, + "content": "\\[\n\\mathrm {F L O P s} _ {\\mathrm {m l p}} = 3 d _ {\\mathrm {m o d e l}} d _ {\\mathrm {f f}}\n\\]" + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.909, + 0.814, + 0.926 + ], + "angle": 0, + "content": "\\( {}^{2} \\) key/value dimension is different from hidden dimension because of GQA (Ainslie et al., 2023)" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.695, + 0.12 + ], + "angle": 0, + "content": "Combining both components, the total per-token linear cost per layer is:" + }, + { + "type": "equation", + "bbox": [ + 0.324, + 0.128, + 0.673, + 0.148 + ], + "angle": 0, + "content": "\\[\n\\mathrm {F L O P s} _ {\\mathrm {l i n e a r}} = 2 d _ {\\mathrm {m o d e l}} ^ {2} + 2 d _ {\\mathrm {m o d e l}} d _ {\\mathrm {k v}} + 3 d _ {\\mathrm {m o d e l}} d _ {\\mathrm {f f}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.162, + 0.825, + 0.192 + ], + "angle": 0, + "content": "2. Quadratic attention cost. Self-attention involves computing interactions between all token pairs, resulting in a quadratic cost:" + }, + { + "type": "equation", + "bbox": [ + 0.372, + 0.199, + 0.624, + 0.231 + ], + "angle": 0, + "content": "\\[\n\\mathrm {F L O P s} _ {\\text {a t t e n t i o n}} = d _ {\\text {m o d e l}} \\cdot \\frac {T (T + 1)}{2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.245, + 0.825, + 0.275 + ], + "angle": 0, + "content": "3. Total generation cost per sequence. Each token attends to all previous tokens across all \\( L \\) layers. The generation cost for a single sequence is:" + }, + { + "type": "equation", + "bbox": [ + 0.32, + 0.282, + 0.677, + 0.3 + ], + "angle": 0, + "content": "\\[\n\\mathrm {F L O P s} _ {\\mathrm {g e n}} = L \\cdot \\left(\\mathrm {F L O P s} _ {\\mathrm {l i n e a r}} \\cdot T + \\mathrm {F L O P s} _ {\\mathrm {a t t e n t i o n}}\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.314, + 0.825, + 0.341 + ], + "angle": 0, + "content": "4. Total inference FLOPs. For \\(N\\) sequences (e.g., best-of- \\(N\\) sampling), the total inference cost is:" + }, + { + "type": "equation", + "bbox": [ + 0.399, + 0.342, + 0.599, + 0.36 + ], + "angle": 0, + "content": "\\[\n\\mathrm {F L O P s} _ {\\text {t o t a l}} = N \\cdot \\mathrm {F L O P s} _ {\\text {g e n}}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.374, + 0.827, + 0.418 + ], + "angle": 0, + "content": "We do not include auxiliary operations such as token embedding and softmax, weight norm, as their contribution is negligible compared to the transformer layers. All FLOPs reported in our experiments use this formula, with model configurations listed in Table 1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.43, + 0.498, + 0.448 + ], + "angle": 0, + "content": "E Majority voting versus best-of-n" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.454, + 0.827, + 0.527 + ], + "angle": 0, + "content": "In this work, we primarily use the best-of-\\(n\\) metric to evaluate the direct solution model. This metric is suitable for tasks where verifying the correctness of a solution is trivial, whereas solving the task itself is challenging. Many real-world problems, such as coding tasks and combinatorial optimization, fall into this category. Conversely, for problems where verification is difficult, metrics such as majority voting may be more appropriate." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.536, + 0.827, + 0.593 + ], + "angle": 0, + "content": "To illustrate this point, we additionally evaluate the CountDown direct solution model using both metrics in Figure 8. For majority voting, we generate n solutions per test problem, select the most frequently occurring solution (breaking ties randomly), and evaluate its correctness." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.604, + 0.827, + 0.746 + ], + "angle": 0, + "content": "We find that the majority-voting performance closely approximates the direct solution model's one-shot accuracy (i.e., best-of- \\( n \\) with \\( n = 1 \\)). However, majority voting is less suitable for our task for several reasons. First, the CountDown game frequently has multiple correct solutions, so selecting the majority solution path can fail to detect cases where the model generates different but equally valid solutions. Second, while majority voting is appropriate in real-world LLM scenarios—such as mathematical reasoning—where distinct solution paths converge to the same final boxed answer, in our synthetic setting, where models are trained from scratch, majority voting essentially becomes a noisy proxy for greedy decoding (sampling at temperature \\( T = 0 \\)). Thus, we expect and observe majority voting accuracy to closely track pass@1 accuracy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.755, + 0.827, + 0.786 + ], + "angle": 0, + "content": "In summary, given the characteristics of our task and the controlled experimental setup, best-of-\\(n\\) remains a valid and preferred metric for evaluating direct solution models." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.797, + 0.562, + 0.816 + ], + "angle": 0, + "content": "F Dependence on depth of the search tree" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.822, + 0.345, + 0.838 + ], + "angle": 0, + "content": "F.1 Search tree depth" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.84, + 0.829, + 0.927 + ], + "angle": 0, + "content": "Why do backtracking models perform well on Selenium but underperform on CountDown, even when both are trained on DFS search traces? We argue that task characteristics—particularly those beyond our control in real—world settings—play a key role in determining whether backtracking is test-time-compute-efficient. A major difference between the two games lies in the depth of their search trees (Figure 2). In hard Selenium puzzles, only 20 out of 81 cells are pre-filled, leaving 50–60 cells to solve. This results in deep search trees with" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.345, + 0.108, + 0.655, + 0.252 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.27, + 0.828, + 0.342 + ], + "angle": 0, + "content": "Figure 8: Majority voting versus best-of- \\( n \\) for CountDown direct solution model. For CountDown, verification is much easier than solving the problem. Therefore, best-of- \\( n \\) as a performance is justified. Additionally, we also examine majority voting performance. However, CountDown solutions are not unique, majority voting is not the most suitable way to measure model performances." + }, + { + "type": "image", + "bbox": [ + 0.228, + 0.357, + 0.49, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.357, + 0.771, + 0.543 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.555, + 0.828, + 0.709 + ], + "angle": 0, + "content": "Figure 9: The efficacy of backtracking depends on search tree depth. Left: We introduce a variation of the CountDown game—stacked CountDown—to increase the search tree depth. In the original CountDown game (solid bars), the direct solution model consistently outperforms the backtracking model, shown by a positive performance gap. In the stacked version (slanted bars), this gap is significantly reduced or even reversed, indicating that backtracking becomes more compute-efficient at greater depths. Right: We introduce a variation of Sudoku—easy Sudoku—where the initial board has more pre-filled cells to decrease the search tree depth. In the original setting, the 38M direct solution model (bottom, solid bars) underperforms the backtracking model. In the shallow Sudoku variant (slanted bars), the performance gap narrows across compute budgets. For the 17M models (top), the results are less conclusive." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.738, + 0.825, + 0.81 + ], + "angle": 0, + "content": "extensive trial-and-error, with many backtracking steps. In contrast, CountDown (in our setup) uses 4 candidate numbers, limiting the search tree depth to just 3. We hypothesize that backtracking models excels at tasks with deeper search trees, while shallow trees make parallel strategies (i.e., direct solution model) more effective. To test this, we design a variant of CountDown with increased search depth and a variant of Sudoku with reduced depth." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.821, + 0.377, + 0.836 + ], + "angle": 0, + "content": "F.2 A deeper CountDown" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.84, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Set up To increase the search tree depth in CountDown, one might naively scale up the number of candidate numbers. However, this approach quickly leads to exponential growth in tree width: with 4 candidates, the tree contains 1,152 nodes; with 5 candidates, it grows to 46,080. To prevent the exponential growth in the number of search paths, we design a stacked CountDown variant that increases depth while controlling tree width. In this setup, the player is given 8 candidate numbers and a final target. The first 4 numbers must be used" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.104, + 0.827, + 0.19 + ], + "angle": 0, + "content": "to reach the 5th number (\"a partial goal\"), and the remaining 4 numbers must then be used to reach the final target. This effectively stacks two CountDown problems, increasing depth without combinatorial explosion. We generate training data for both backtracking and no-backtracking models following the same procedure as in Section 3.1.2, with examples provided in Appendix H (Figure 16). We train a 17M as well as a 38M model until validation loss has converged, and test on 200 unseen problems." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.201, + 0.828, + 0.327 + ], + "angle": 0, + "content": "Results In Figure 9 (left), we compare the performance gap between the direct solution model and the backtracking model, measured by the difference in test accuracy. In the original CountDown setting (solid bars), the direct solution model consistently outperforms the backtracking model across all test compute budgets. However, in the stacked CountDown variant (slanted bars), the performance gap narrows significantly—and in some cases, reverses. The sign reverse indicates the backtracking model now outperforms the direct solution model. These results support our hypothesis: in CountDown, backtracking becomes more compute-efficient as the search tree depth increases. We observe this trend across both 17M and 38M models." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.341, + 0.367, + 0.356 + ], + "angle": 0, + "content": "F.3 A shallower Selenium" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.362, + 0.828, + 0.462 + ], + "angle": 0, + "content": "Set up To reduce the search tree depth in Selenium, we generate easier boards by increasing the number of initially filled cells. Specifically, we take the original 3M Selenium dataset Radcliffe (2020) and apply the direct solution model (Section 3.2.2) to correctly fill 10 additional cells. This increases the average number of pre-filled cells from 20 to around 30, effectively decreasing search tree depth. We generate both backtracking and direct solution training data following the same procedure in Section 3.2.2. Models with 17M and 38M parameters are trained to convergence and evaluated on 200 unseen problems." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.473, + 0.828, + 0.587 + ], + "angle": 0, + "content": "Results In Figure 9 (right), we show the performance gap between the direct solution and backtracking models, measured by the difference in test accuracy. In the original (hard) Sudoku setting, the 38M direct solution model consistently underperforms the backtracking model, as indicated by the negative gaps (solid green bars). In the shallow-Sudoku variant (slanted bars), these gaps are reduced across all test-time compute budgets for the 38M model. The trend is less clear for the 17M model, where the performance difference remains small in both settings. Overall, these results support our hypothesis: in Sudoku, backtracking becomes more test-time-compute-efficient when the search tree is deeper." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.603, + 0.377, + 0.619 + ], + "angle": 0, + "content": "G Additional results" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.63, + 0.533, + 0.646 + ], + "angle": 0, + "content": "G.1 Exploring different CountDown strategies" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.651, + 0.827, + 0.751 + ], + "angle": 0, + "content": "We analyze different search strategies for CountDown, including DFS and BFS with varying beam widths. For each strategy, we tokenize the resulting backtracking trace and measure number of tokens used in each search trace. The goal is to identify which strategy that finds correct solutions with the fewest tokens (Figure 10). The results show no clear winner. BFS with a smaller beam width produces shorter traces by exploring fewer nodes, but this comes at the cost of missing correct solutions more frequently. Increasing the beam width improves solution coverage but leads to longer traces due to broader exploration." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.763, + 0.826, + 0.821 + ], + "angle": 0, + "content": "In contrast, DFS produces more uniformly distributed trace lengths but suffers from a specific failure mode: it may prune the correct path early and terminate prematurely. These failures appear as short but incorrect traces, visible as the left-most orange bars in Figure 10 (bottom)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.833, + 0.519, + 0.849 + ], + "angle": 0, + "content": "G.2 Compare think-backtrack and backtrack" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.854, + 0.828, + 0.927 + ], + "angle": 0, + "content": "Table 3 further shows a confusion matrix comparing the original and think-backtrack models. The backtracking model solves 102 test problems in total with maximum test-time compute budget (4096 tokens). Out of those 102 problems, the think-backtrack model solves most of them. This evidence further shows that by training on shortened search traces, the model learns to internalize parts of its thinking without sacrificing performances." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.173, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.274, + 0.103, + 0.725, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.413, + 0.828, + 0.47 + ], + "angle": 0, + "content": "Figure 10: Examine different search strategies for CountDown. Beyond DFS, we experiment with Bread-First-Search (BFS) with different beam widths. We tokenize the search trace and measure the number of tokens as search length. There is not one search algorithm that is optimal to generate both short and correct solution traces." + }, + { + "type": "table", + "bbox": [ + 0.336, + 0.482, + 0.663, + 0.543 + ], + "angle": 0, + "content": "
T-B CorrectT-B Incorrect
B Correct8319
B Incorrect4157
" + }, + { + "type": "table_caption", + "bbox": [ + 0.192, + 0.552, + 0.802, + 0.568 + ], + "angle": 0, + "content": "Table 3: Confusion matrix between Think-Backtrack (T-B) and Backtrack (B) models." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.595, + 0.48, + 0.611 + ], + "angle": 0, + "content": "G.3 Supervised learning training curve" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.613, + 0.825, + 0.669 + ], + "angle": 0, + "content": "During training, we set the maximum epochs to 30 epochs and allow early stopping. All models converge before 30 epochs and we early stop training when the validation loss has converged on log-log scale. Figure 11, 12 show the training curve for both models and for CountDown and Selenium." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.679, + 0.398, + 0.695 + ], + "angle": 0, + "content": "G.4 Additional GRPO plots" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.697, + 0.827, + 0.81 + ], + "angle": 0, + "content": "In Figure 3 (Section 4.2.1), we used the number of mistakes as a proxy for comparing search strategies. To further demonstrate that the backtracking model fine-tuned with GRPO discovers new strategies, we repeat the same analysis in Figure 13 (right). Compared to the original backtracking model (Figure 13, left), the post-GRPO model solves many problems with a different number of mistakes than the number of mistakes made by DFS. This shift indicates that the model is no longer tightly aligned with the original search trace and has discovered alternative, more diverse solution paths. Figure 6 (left) quantifies the above qualitative observation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.822, + 0.327, + 0.841 + ], + "angle": 0, + "content": "H Data sample" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.846, + 0.826, + 0.89 + ], + "angle": 0, + "content": "Figure 14 shows an example of a CountDown game and the training data. Figure 15 shows an example of a Sudoku game and the training data. Figure 16 shows an example of stacked-CountDown variation and the training data." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "image", + "bbox": [ + 0.272, + 0.166, + 0.505, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.506, + 0.167, + 0.728, + 0.306 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.316, + 0.825, + 0.345 + ], + "angle": 0, + "content": "Figure 11: CountDown validation loss. Left: Backtracking model. Right: Direct solution model." + }, + { + "type": "image", + "bbox": [ + 0.272, + 0.348, + 0.501, + 0.487 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.348, + 0.727, + 0.487 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.172, + 0.498, + 0.825, + 0.515 + ], + "angle": 0, + "content": "Figure 12: Sodomu validation loss. Left: Backtracking model. Right: Direct solution model." + }, + { + "type": "image", + "bbox": [ + 0.24, + 0.648, + 0.495, + 0.803 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.648, + 0.758, + 0.803 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.814, + 0.828, + 0.859 + ], + "angle": 0, + "content": "Figure 13: Backtracking model can discover some new search strategies. Post GRPO, the backtracking model discover new strategies: In the right panel, for each problem, the post-GRPO model makes a different number of mistakes compared to DFS trace." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.198, + 0.136, + 0.439, + 0.153 + ], + "angle": 0, + "content": "Example of a CountDown game" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.163, + 0.56, + 0.179 + ], + "angle": 0, + "content": "Target: 16 Candidates: [96, 11, 78, 22]" + }, + { + "type": "title", + "bbox": [ + 0.198, + 0.197, + 0.332, + 0.21 + ], + "angle": 0, + "content": "DFS Search Trace" + }, + { + "type": "code", + "bbox": [ + 0.226, + 0.221, + 0.7, + 0.661 + ], + "angle": 0, + "content": "Current State: 16:[96, 11, 78, 22], Operations: [ ] \nExploring Operation: 96-78=18, Resulting Numbers: [11, 22, 18] \nGenerated Node #0,0: 16:[11, 22, 18] Operation: 96-78=18 \nMoving to Node #0,0 \nCurrent State: 16:[11, 22, 18], Operations: ['96-78=18'] \nExploring Operation: 22-11=11, Resulting Numbers: [18, 11] \nGenerated Node #0,0,0: 16:[18, 11] Operation: 22-11=11 \nMoving to Node #0,0,0 \nCurrent State: 16:[18, 11], Operations: ['96-78=18', '22-11=11'] \nExploring Operation: 18-11=7, Resulting Numbers: [7] \n7,16 unequal: No Solution \nMoving to Node #0,0,0 \nCurrent State: 16:[18, 11], Operations: ['96-78=18', '22-11=11'] \nExploring Operation: 18+11=29, Resulting Numbers: [29] \n29,16 unequal: No Solution \nMoving to Node #0,0 \nCurrent State: 16:[11, 22, 18], Operations: ['96-78=18'] \nExploring Operation: 18-11=7, Resulting Numbers: [22, 7] \nGenerated Node #0,0,1: 16:[22, 7] Operation: 18-11=7 \nMoving to Node #0,0,1 \nCurrent State: 16:[22, 7], Operations: ['96-78=18', '18-11=7'] \nExploring Operation: 22-7=15, Resulting Numbers: [15] \n15,16 unequal: No Solution \nMoving to Node #0,0,1 \nCurrent State: 16:[22, 7], Operations: ['96-78=18', '18-11=7'] \nExploring Operation: 22+7=29, Resulting Numbers: [29] \n29,16 unequal: No Solution \nMoving to Node #0,0 \nCurrent State: 16:[11, 22, 18], Operations: ['96-78=18'] \nExploring Operation: 22/11=2, Resulting Numbers: [18, 2] \nGenerated Node #0,0,2: 16:[18, 2] Operation: 22/11=2 \nMoving to Node #0,0,2 \nCurrent State: 16:[18, 2], Operations: ['96-78=18', '22/11=2'] \nExploring Operation: 18-2=16, Resulting Numbers: [16] \n16,16 equal: Goal Reached" + }, + { + "type": "title", + "bbox": [ + 0.198, + 0.681, + 0.36, + 0.695 + ], + "angle": 0, + "content": "Direct Solution Trace" + }, + { + "type": "code", + "bbox": [ + 0.226, + 0.704, + 0.686, + 0.818 + ], + "angle": 0, + "content": "Current State: 16:[96, 11, 78, 22], Operations: [ ] \nExploring Operation: 96-78=18, Resulting Numbers: [11, 22, 18] \nGenerated Node #2: [11, 22, 18] from Operation: 96-78=18 \nCurrent State: 16:[11, 22, 18], Operations: ['96-78=18'] \nExploring Operation: 22/11=2, Resulting Numbers: [18, 2] \nGenerated Node #3: [18, 2] from Operation: 22/11=2 \nCurrent State: 16:[18, 2], Operations: ['96-78=18', '22/11=2'] \nExploring Operation: 18-2=16, Resulting Numbers: [16] \n16,16 equal: Goal Reached" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.846, + 0.828, + 0.888 + ], + "angle": 0, + "content": "Figure 14: Example of training data for CoutnDown. Top: game setup. Middle: full DFS search trace for training backtracking model. Bottom: correct solution path for training direct solution model." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "table_caption", + "bbox": [ + 0.338, + 0.183, + 0.4, + 0.192 + ], + "angle": 0, + "content": "Initial Board" + }, + { + "type": "table", + "bbox": [ + 0.244, + 0.193, + 0.495, + 0.373 + ], + "angle": 0, + "content": "
7245
17
839
51
297
32
84
4789
6213
" + }, + { + "type": "table_caption", + "bbox": [ + 0.595, + 0.183, + 0.664, + 0.192 + ], + "angle": 0, + "content": "Solved Board" + }, + { + "type": "table", + "bbox": [ + 0.504, + 0.193, + 0.756, + 0.373 + ], + "angle": 0, + "content": "
712498635
964135278
583726194
876954312
239861457
451372986
328549761
147683529
695217843
" + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.389, + 0.404, + 0.404 + ], + "angle": 0, + "content": "Example of a Sudoku game" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.415, + 0.801, + 0.468 + ], + "angle": 0, + "content": "START \\((0, 0) = 7\\) \\((0, 2) = 2\\) \\((0, 3) = 4\\) \\((0, 8) = 5\\) \\((1, 3) = 1\\) \\((1, 7) = 7\\) \\((2, 1) = 8\\) \\((2, 2) = 3\\) \\((2, 7) = 9\\) \\((3, 4) = 5\\) \\((3, 7) = 1\\) \\((4, 0) = 2\\) \\((4, 2) = 9\\) \\((4, 8) = 7\\) \\((5, 3) = 3\\) \\((5, 5) = 2\\) \\((6, 2) = 8\\) \\((6, 4) = 4\\) \\((7, 1) = 4\\) \\((7, 2) = 7\\) \\((7, 4) = 8\\) \\((7, 8) = 9\\) \\((8, 0) = 6\\) \\((8, 3) = 2\\) \\((8, 4) = 1\\) \\((8, 8) = 3\\) solving" + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.485, + 0.331, + 0.498 + ], + "angle": 0, + "content": "DFS Search Trace" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.508, + 0.802, + 0.682 + ], + "angle": 0, + "content": "SOL_START \\((4, 4) = 6\\) \\((8, 2) = 5\\) \\((4, 3) = 8\\) \\((8, 1) = 9\\) \\((8, 5) = 7\\) GUESS: \\((0, 1)\\) \\([1, 6] = 1\\) \\((0, 1) = 1\\) GUESS: \\((0, 4)\\) \\([3, 9] = 3\\) \\((0, 4) = 3\\) GUESS: \\((0, 6)\\) \\([6, 8] = 6\\) \\((0, 6) = 6\\) \\((0, 7) = 8\\) \\((8, 7) = 4\\) \\((0, 5) = 9\\) \\((1, 4) = 2\\) \\((1, 8) = 4\\) \\((2, 4) = 7\\) \\((3, 5) = 1\\) \\((4, 5) = 9\\) \\((8, 6) = 8\\) \\((1, 2) = 6\\) \\((1, 6) = 3\\) \\((3, 3) = 7\\) \\((1, 1) = 5\\) \\((1, 5) = 8\\) \\((2, 0) = 4\\) \\((4, 1) = 3\\) \\((4, 7) = 5\\) \\((5, 6) = 4\\) \\((5, 7) = 6\\) \\((5, 8) = 8\\) \\((6, 1) = 2\\) \\((7, 7) = 2\\) \\((1, 0) = 9\\) \\((3, 0) = 8\\) \\((3, 1) = 6\\) \\((3, 8) = 2\\) \\((5, 1) = 7\\) \\((5, 2) = 1\\) \\((2, 8) = 1\\) \\((3, 6) = 9\\) \\((5, 0) = 5\\) \\((6, 8) = 6\\) \\((2, 6) = 2\\) NO_CANDIDATE: \\((3, 2)\\) NO CANDIDATE: \\((0, 6)\\) REVERT: \\((0, 6)\\) \\([6, 8] =\\) NONE GUESS: \\((0, 6)\\) \\([6, 8] = 8\\) \\((0, 6) = 8\\) \\((0, 7) = 6\\) \\((8, 6) = 4\\) \\((8, 7) = 8\\) \\((0, 5) = 9\\) \\((1, 4) = 2\\) \\((1, 6) = 3\\) \\((1, 8) = 4\\) \\((2, 4) = 7\\) \\((3, 5) =\\) \\(4\\) \\((4, 5) = 1\\) \\((4, 6) = 5\\) \\((5, 4) = 9\\) \\((5, 6) = 6\\) \\((5, 7) = 4\\) \\((5, 8) = 8\\) \\((1, 2) = 6\\) \\((3, 3) = 7\\) \\((3, 8) = 2\\) \\((4,\\) \\(1) = 3\\) \\((5,2) = 1\\) \\((6,1) = 2\\) \\((6,7) = -2\\) \\((1,1) = -5\\) \\((1,5) = -8\\) \\((2,0) = -4\\) \\((2,8) = -1\\) \\((3,0)=\\( \\(8(3,1) = -6(3,6) = -7(7,0)=3(6,0)=1\\text{NO\\_CANDIDATE}:(3,2)\\text{NO\\_CANDIDATE}:(0,6)\\text{revert}:(0,6)[6,(8)]=\\)\\(\\text{NO\\_CANDIDATE NO\\_CANDIDATE}:(0,4)\\text{REVERT}:(0,4)[3,9]=\\text{NONE GUESS}:(0,4)[3,9]=9(0,4)=9(5,4)=7(2,4)=2(3,3)=9(3,5)=4(4,5)=1(1,4)=3(3,2)=6(5,1)=5(1,1)=6(1,2)=4(2,\\)\\)\\(\\begin{array}{r}\\emptyset=5(2,5)=6(4,1)=3(5,2)=1(6,1)=2(0,5)=8(1,\\emptyset)=9(1,5)=5(2,3)=7(3,\\emptyset)=8\\\\(3,\\text{1})=7(3,\\text{8})=2(5,\\emptyset)=4(7,\\text{5})=3(1,\\text{8})=8(3,\\text{6})=3(5,\\text{8})=6(6,\\text{5})=9(6,\\text{8})=1(7,\\emptyset)=\\\\1(0,\\emptyset)=6(0,\\emptyset)=3(1,\\emptyset)=2(2,\\emptyset)=4(5,\\emptyset)=8(6,\\emptyset)=3(7,\\emptyset)=5(8,\\emptyset)=4(2,\\emptyset)=1(4,\\emptyset)\\end{array}\\) \\(= (3,1) = -7(3,\\text{8}) = -6(4,\\text{7}) = -3(1,\\text{6}) = -2(2,\\text{8}) = -4(5,\\text{7}) = -8(6,\\text{0}) = -3(7,\\text{7}) = -2(8,\\text{6}) = -8(6,\\text{3}) = -5\\mathrm{SOL}\\quad\\mathrm{END}\\)" + }, + { + "type": "title", + "bbox": [ + 0.198, + 0.701, + 0.323, + 0.714 + ], + "angle": 0, + "content": "Correct Solution" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.723, + 0.802, + 0.787 + ], + "angle": 0, + "content": "\\(\\begin{array}{l}\\mathrm{SOL\\_START(4,4) = 6(8,2) = 5(4,3) = 8(8,1) = 9(8,5) = 7(0,1) = 1(0,4) = 9(5,4) = 7(2,4) =}\\\\ \\mathrm{2(3,3) = 9(3,5) = 4(4,5) = 1(1,4) = 3(3,2) = 6(5,1) = 5(1,1) = 6(1,2) = 4(2,0) = 5(2,5)}\\\\ \\mathrm{= 6(4,1) = 3(5,2) = 1(6,1) = 2(0,5) = 8(1,0) = 9(1,5) = 5(2,3) = 7(3,0) = 8(3,1) = 7(3,}\\\\ \\mathrm{8) = 2(5,0) = 4(7,5) = 3(1,8) = 8(3,6) = 3(5,8) = 6(6,5) = 9(6,8) = 1(7,0) = 1(0,6) = 6}\\\\ (\\emptyset ,7) = 3(1,6) = 2(2,8) = 4(5,7) = 8(6,\\emptyset) = 3(7,6) = 5(8,7) = 4(2,6) = 1(4,6) = 4(4,7) = \\\\ \\mathrm{5(5,6)} = 9(6,6) = 7(6,7) = 6(7,3) = 6(7,7) = 2(8,6) = 8(6,3) = 5\\mathrm{SOL\\_END} \\end{array}\\)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.814, + 0.828, + 0.843 + ], + "angle": 0, + "content": "Figure 15: Example of training data for Sudoku game. Top: initial puzzle setup. Middle: full search trace with guesses and backtracking (tabs used). Bottom: final correct solution." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.174, + 0.033, + 0.518, + 0.049 + ], + "angle": 0, + "content": "Published as a conference paper at COLM 2025" + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.105, + 0.5, + 0.121 + ], + "angle": 0, + "content": "Example of a stacked-CountDown game" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.131, + 0.689, + 0.147 + ], + "angle": 0, + "content": "Target: 96 Candidates: [22, 77, 24, 48, 31, 12, 36, 35]" + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.165, + 0.331, + 0.179 + ], + "angle": 0, + "content": "DFS Search Trace" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.187, + 0.825, + 0.593 + ], + "angle": 0, + "content": "Current State: 96:[22, 77, 24, 48, 31, 12, 36, 35], Operations: [] Exploring Operation: \\(77 - 48 = 29\\) ,Resulting Numbers: [22, 24, 29, 31, 12, 36, 35] Generated Node #0,0:96:[22, 24, 29, 31, 12, 36, 35]Operation: \\(77 - 48 = 29\\) Moving to Node \\(\\# 0,0\\) Current State:96:[22, 24, 29, 31, 12, 36, 35],Operations:['77-48=29'] Exploring Operation: \\(22 + 24 = 46\\) ,Resulting Numbers: [29, 46, 31, 12, 36, 35] Generated Node \\(\\# 0,0,0\\) :96:[29,46,31,12,36,35]Operation: \\(22 + 24 = 46\\) Moving to Node \\(\\# 0,0,0\\) Current State:96:[29,46,31,12,36,35],Operations:'77-48=29', '22+24=46' ] Exploring Operation: \\(46 - 29 = 17\\) ,Resulting Numbers: [17,31,12,36,35] 17,31 unequal Moving to Node \\(\\# 0,0\\) ... . . . Current State:96:[29,2,31,12,36,35],Operations:'77-48=29', '24-22=2' ] Exploring Operation: \\(29 + 2 = 31\\) ,Resulting Numbers: [31,31,12,36,35] 31,31 equal Current State:96:[31,12,36,35],Operations:[] Exploring Operation: \\(36 - 35 = 1\\) ,Resulting Numbers: [31,12,1] Generated Node \\(\\# 0,0\\) :96:[31,12,1]Operation: \\(36 - 35 = 1\\) Moving to Node \\(\\# 0,0\\) Current State:96:[31,12,1],Operations:'36-35=1' Exploring Operation: \\(31 + 1 = 32\\) ,Resulting Numbers: [12,32] Generated Node \\(\\# 0,0,0\\) :96:[12,32]Operation: \\(31 + 1 = 32\\) Moving to Node \\(\\# 0,0,0\\) Current State:96:[12,32],Operations:' \\(36 - 35 = 1\\) ,' \\(31 + 1 = 32\\) ' Exploring Operation: \\(12 + 32 = 44\\) ,Resulting Numbers: [44] 44.96 unequal:No Solution ... Exploring Operation: \\(4*24 = 96\\) ,Resulting Numbers:[96]" + }, + { + "type": "title", + "bbox": [ + 0.198, + 0.611, + 0.359, + 0.625 + ], + "angle": 0, + "content": "Direct Solution Trace" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.633, + 0.82, + 0.864 + ], + "angle": 0, + "content": "Current State: 96:[22, 77, 24, 48, 31, 12, 36, 35], Operations: [] Exploring Operation: 77-22=55, Resulting Numbers: [55, 24, 48, 31, 12, 36, 35] Generated Node #2: [55, 24, 48, 31, 12, 36, 35] from Operation: 77-22=55 Current State: 96:[55, 24, 48, 31, 12, 36, 35], Operations: ['77-22=55'] Exploring Operation: 48-24=24, Resulting Numbers: [55, 24, 31, 12, 36, 35] Generated Node #3: [55, 24, 31, 12, 36, 35] from Operation: 48-24=24 Current State: 96:[55, 24, 31, 12, 36, 35], Operations: ['77-22=55', '48-24=24'] Exploring Operation: 55-24=31, Resulting Numbers: [31, 31, 12, 36, 35] 31,31 equal Current State: 96:[31, 12, 36, 35], Operations: [] Exploring Operation: 35-31=4, Resulting Numbers: [4, 12, 36] Generated Node #2: [4, 12, 36] from Operation: 35-31=4 Current State: 96:[4, 12, 36], Operations: ['35-31=4'] Exploring Operation: 36-12=24, Resulting Numbers: [24, 4] Generated Node #3: [24, 4] from Operation: 36-12=24 Current State: 96:[24, 4], Operations: ['35-31=4', '36-12=24'] Exploring Operation: \\(4 \\times 24 = 96\\) ,Resulting Numbers:[96] 96,96 equal: Goal Reached" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.89, + 0.825, + 0.934 + ], + "angle": 0, + "content": "Figure 16: Example of training data for stacked-CoutnDown (Appendix F). Top: game setup. Middle: full DFS search trace for training backtracking model. Bottom: correct solution path for training direct solution model." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "26" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07052/ecb0ea5a-e806-4206-b96f-d3cf7b2ea5b5_origin.pdf b/data/2025/2504_07xxx/2504.07052/ecb0ea5a-e806-4206-b96f-d3cf7b2ea5b5_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..789ff71ee8c50c15aedd72a4142f28e1990358bc --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/ecb0ea5a-e806-4206-b96f-d3cf7b2ea5b5_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a4fb2a3050df4ff8145e1a657ed2d3fa5cc35b22084081b1a92c7c2323eebc6 +size 974259 diff --git a/data/2025/2504_07xxx/2504.07052/full.md b/data/2025/2504_07xxx/2504.07052/full.md new file mode 100644 index 0000000000000000000000000000000000000000..c862abe87e891e7b0c715967941e77d3f3fcb68f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/full.md @@ -0,0 +1,584 @@ +# To Backtrack or Not to Backtrack: When Sequential Search Limits Model Reasoning + +Tian Qin* + +Harvard University + +David Alvarez-Melis + +Harvard University, Kempner Institute, MSR + +Samy Jelassi† + +Harvard University, Kempner Institute + +Eran Malach‡† + +Harvard University, Kempner Institute + +# Abstract + +Recent advancements in large language models (LLMs) have significantly improved their reasoning abilities, particularly through techniques involving search and backtracking. Backtracking naturally scales test-time compute by enabling sequential, linearized exploration via long chain-of-thought (CoT) generation. However, this is not the only strategy for scaling test time-compute: parallel sampling with best-of- $n$ selection provides an alternative that generates diverse solutions simultaneously. Despite the growing adoption of sequential search, its advantages over parallel sampling—especially under a fixed compute budget—remain poorly understood. In this paper, we systematically compare these two approaches on two challenging reasoning tasks: CountDown and Sudoku. Surprisingly, we find that sequential search underperforms parallel sampling on CountDown but outperforms it on Sudoku, suggesting that backtracking is not universally beneficial. We identify two factors that can cause backtracking to degrade performance: (1) training on fixed search traces can lock models into suboptimal strategies, and (2) explicit CoT supervision can discourage 'implicit' (non verbalized) reasoning. Extending our analysis to reinforcement learning (RL), we show that models with backtracking capabilities benefit significantly from RL fine-tuning, while models without backtracking see limited, mixed gains. Together, these findings challenge the assumption that backtracking universally enhances LLM reasoning, instead revealing a complex interaction between task structure, training data, model scale, and learning paradigm. + +# 1 Introduction + +Recent studies (Kumar et al., 2024; Havrilla et al., 2024) propose teaching LLMs to correct mistakes through backtracking, enabling exploration of alternative solutions. Despite growing popularity (DeepSeek-AI et al., 2025; Muennighoff et al., 2025), it remains unclear whether correcting errors post-hoc via backtracking is ultimately more compute-efficient at test time than directly learning the correct solution. Solving strategic games such as CountDown and Sudo require extensive exploration of different solution paths, making them ideal for analyzing the computational trade-offs of sequential versus parallel search. In this work, we use these two games to conduct a controlled investigation to determine whether backtracking is an effective way to scale test-time compute. + +There are two primary strategies to scale LLMs' test-time compute: sequential autoregressive search (explicit backtracking within a chain-of-thought) and parallel sampling (generating multiple independent solutions and selecting the best with best-of- $n$ ). While sequential search allows the model to refine reasoning by learning from past mistakes, it comes at a cost: due to the attention mechanism, the FLOPs required to generate CoT grow quadratically + +![](images/4845c8f0e654443d5b277561c128b32ab3ce49a0eb29172ae399b108b2328f01.jpg) +Figure 1: Backtracking performance varies significantly with task type and the application of post-training reinforcement learning. (A) Training backtracking and direct solution models on CountDown and Selenium reveals task-dependent performance: under equal test-time compute, backtracking (sequential search) underperforms direct solution with best-of- $n$ generation (parallel search) on CountDown, but outperforms it on Selenium. (B) Fine-tuning with GRPO consistently improves backtracking model performance across compute budgets, but has mixed effects on the direct solution model. + +![](images/f9c05a7eabbf3900cda3687f00654531d5b068bf60ac7e40b2f0d44c39add68a.jpg) + +with sequence length. Even when generating the same number of tokens, sequential search incurs more FLOPs than parallel sampling. To compare these two strategies, we train (i) backtracking models that learn from explicit search traces and use sequential search to solve hard problems, and (ii) direct solution (i.e., no backtracking) models that learn solely from correct solutions, using parallel search at test time. Equating test-time compute, we observe contrasting results (Fig. 1 A): in CountDown, the backtracking model consistently underperforms, whereas in Sudo, it consistently outperforms the direct solution model. + +Through controlled experiments, we identify two reasons teaching backtracking can inadvertently degrade performance. First, explicit backtracking reasoning traces bias models toward prescribed search strategies, limiting exploration of potentially superior alternatives. In CountDown, the backtracking model closely mimics training search paths, while the direct solution model independently discovers more efficient strategies (Section 4.2). Second, detailed backtracking traces encourage morbidity (producing lengthy yet ineffective reasoning chains), while discouraging internal "thinking" (implicit reasoning without outputting CoT, Section 4.3). Beyond these factors, we demonstrate that model size and task-specific characteristics also impact the effectiveness of backtracking (Section 5.1). Crucially, we show that our contrastive observation between Sudoku and Countdown generalizes to real-world tasks: such as math and science problem solving. We show that backtracking is not always the most effective way to scale test-time compute (Appendix A) for general reasoning models. + +Extending beyond supervised learning, we evaluate reinforcement learning (RL) with Group Relative Policy Optimization (GRPO) (Shao et al., 2024), uncovering novel interactions between backtracking capabilities and RL. We show that the backtracking model discovers new, effective search strategies through RL, achieving substantial performance improvements. Conversely, the direct solution model improves one-shot accuracy but loses effectiveness in parallel search, revealing a clear trade-off (Fig. 1 B). This finding shifts our understanding of how backtracking influences a model's potential to improve under RL, highlighting the unique advantage of teaching backtracking for long-term reasoning capabilities. + +Our controlled study on two strategic games provides a nuanced understanding of when backtracking effectively scales test-time compute. Our main contributions are: + +- We use CountDown and Selenium as controlled testbeds to examine whether backtracking enables efficient test-time scaling. Under a fixed compute budget, backtracking outperforms parallel search in Selenium but underperforms in CountDown (Fig. 1 A). +- We identify two key factors affecting backtracking efficacy: (1) Prescribed search bias: Training on detailed backtracking traces can unintentionally constrain models to subopti + +![](images/16a0114708d8b92c3f2baea7ffa0d6e48f30371b94caec65de5a259c8b2f430a.jpg) + +![](images/02b12a0d63aecd5659c4e656b5c8285ea2c85ddb288feb1efb1f169ec1e01426.jpg) + +![](images/93f7d7867694af716476750ed208a1f45b43f84d6a6f61f3e5d59f57fa4913ff.jpg) + +![](images/812d28a296faed703ecac1e5215249a835aa2ce1668ed109dca4556d34e7a424.jpg) +Direct solution model performs parallel search through temperature sampling: +Figure 2: Backtracking and direct solution for two different strategic games. Panel $(a, b)$ : Example the search tree for CountDown and Sudoku. Solving both games require extensive search in the solution space. Panel $(c)$ : The backtracking model is trained on the search traces generated by a Depth-First-Search (DFS) algorithm. At test time, the model performs sequential search. The direct solution model is trained on the correct solution only. At test time, the model performs parallel search through temperature sampling and takes best-of- $n$ . + +mal search strategies. (2) Excessive morbidity: Explicit backtracking traces encourage models to produce lengthy reasoning chains without improving reasoning ability. + +- We demonstrate that reinforcement learning (GRPO) consistently enhances backtracking models by enabling discovery of novel solutions, whereas direct solution models experience mixed outcomes (Fig. 1 B). + +# 2 Related Work + +See Appendix B for an extensive review on related work. + +Scaling test-time compute. Prior work has explored scaling language model performance at test time through parallel or sequential search strategies. Parallel methods rely on independent sampling and selection via heuristics or reward models (Brown et al., 2024; Irvine et al., 2023; Levi, 2024; Xin et al., 2024), while sequential methods refine reasoning step by step using earlier outputs (Hou et al., 2025; Lee et al., 2025). Tree-based methods such as MCTS bridge the two and often incorporate process-level reward models to guide reasoning (Wu et al., 2024; Lightman et al., 2023). Our work contributes to this area by comparing sequential (backtracking) and parallel search under fixed compute budgets. + +Self-correction and backtracking. Language models can be trained to self-correct through fine-tuning on revision data, synthetic augmentations, or reward-based learning (Saunders et al., 2022; Qu et al., 2024; Welleck et al., 2022). Some approaches also introduce explicit search or separate correction modules to guide revision (Yao et al., 2023b; Havrilla et al., + +2024). We build on this line of work by studying backtracking as an implicit form of self-correction, analyzing when learning to backtrack helps or hinders reasoning. + +Reinforcement learning for LLM reasoning. Reinforcement learning has shown promise in enabling language models to autonomously discover reasoning strategies, including through simplified algorithms like GRPO (Shao et al., 2024; DeepSeek-AI et al., 2025). While prior work has demonstrated strong results, it remains unclear which model properties enable successful RL-based reasoning (Zelikman et al., 2022; Kazemnejad et al., 2024). Our study addresses this gap by comparing how backtracking and no backtracking models respond to RL fine-tuning, revealing asymmetric benefits. + +# 3 Two strategic games: CountDown and Sudoku + +# 3.1 CountDown + +# 3.1.1 Game setup + +The Game of CountDown has been frequently used as a testbed to study and evaluate LLM reasoning (Gandhi et al., 2024; 2025; Yao et al., 2023a). In a CountDown game, the player is given a set of candidate numbers and a target number (restricted to integers). The goal is to reach the target by applying a sequence of arithmetic operations—addition, subtraction, multiplication, or division—using the candidate numbers. Each number must be used exactly once, and intermediate results can be reused in subsequent operations. + +To algorithmically solve CountDown, we can represent the problem as a search tree (Fig. 2a). Each node in the search tree corresponds to a state defined by the current set of available numbers. At each step, the algorithm selects a pair of numbers from the set and applies one of the four operations, replacing the pair with the resulting value to create a new state. This process continues recursively until the target number is reached (correct leaf node) or all combinations are exhausted (wrong leaf node). In this work, we play the CountDown with four candidate numbers, and for each game, there are 1,152 possible search paths. + +# 3.1.2 Data generation + +We generate backtracking traces with Depth First Search (DFS) with a sum-heuristic (Gandhi et al. (2024), further details in Appendix C.1). We generate a dataset of 500,000 CountDown questions, and the DFS search correctly solves $57\%$ of the questions. The backtracking trace is a serialized version of DFS, listing all the tree nodes visited in the order of DFS traversal. To construct the direct solution training data, we prune the backtracking traces to keep only the correct solution path. With the pruning approach, we remove the exploratory parts of the trace while preserving the answer format and scaffolding used in the backtracking model, to ensure a fair comparison. We also ensure that the direct solution model does not see more solved CountDown games, we include only the 285,000 questions (i.e., $500,000 \times 0.57$ ) that DFS successfully solves. We provide examples of both training data in Appendix H. + +# 3.2 Suku + +# 3.2.1 Game setup + +Sudoku is another prototypical strategic game used to study reasoning and search in LLMs (Yao et al., 2023a; Long, 2023). In this work, we focus on hard $9 \times 9$ Sudo boards, where only about 20 of the 81 cells are pre-filled, making the search space substantially larger (see Appendix C.1 for a description of Sudo rules). To algorithmically solve Sudo, we represent the problem as a search tree (Fig. 2b). Each node corresponds to a partial board state, where some cells have been filled. At each step, the algorithm selects an unfilled cell and fills it with a candidate digit that satisfies Sudo constraints in the current state. Each valid assignment creates a new child node representing the updated board. The process continues recursively until a complete, valid solution is reached (correct leaf node) or no valid moves remain (wrong leaf node). The depth of the tree corresponds to the number of empty cells, and the branching factor at each node depends on the number of unfilled cells as well as how many digits are valid for each unfilled cell. + +![](images/6d48ef2be03c9c8fe24eb88ceffb6ab73372724258852629afc0ff253920d49b.jpg) +Figure 3: Backtracking and direct solution models implement different search strategies for CountDown. For test questions that model solves correctly, we measure the number of mistakes made (i.e., wrong terminal nodes visited) before finding the correct solution. We sort the test questions by number of mistakes made by DFS. Left: Trained on DFS traces, the number of mistakes made by the backtracking model correlates with the DFS. Middle: In contrast, the direct solution model solves a lot more problems with significantly fewer mistakes compared to DFS. Right: For a given number of mistakes made, we examine whether two models solve the same set of question as DFS. Direct solution model implements a search strategy significantly different from DFS. + +![](images/e70f23b00318eb73998084c7cc03a262857c0b2c31e8da7e040b4feba0f83084.jpg) + +![](images/6c35e4c35a8fbc4b9bb884248ec42755727f5cb1a53b8128d768384367b79683.jpg) + +# 3.2.2 Data generation + +We follow the same procedure as CountDown to generate training data for both the backtracking and direct solution models. We use a DFS-based search algorithm, in combination with a Sudoku solver that applies seven common human strategies (e.g., naked singles, hidden pairs and etc, Papadimas. (2023)) to eliminate candidates for unfilled cells. At each node, we use the 7 strategies to eliminate candidates for unfilled cells, and then DFS chooses an unfilled cell, makes a guess and continues solving recursively. This process continues until the board is either solved or reaches a dead-end (i.e., an invalid state with no legal moves). We use a dataset of 3M Sudoku puzzles from (Radcliffe, 2020), and the combined DFS-solver approach successfully solves $98\%$ of them. Since DFS successfully solves nearly all puzzles, we train both models on 2.8M examples and reserve the last 200K for validation and testing. We provide further details on Sudoku training data generation in Appendix C.1 and data examples in Appendix H. + +# 3.3 Model and training + +We use Qwen2.5-style model architectures (Yang et al., 2024) with RoPE positional encoding (Su et al., 2021) and Group Query Attention (GQA) (Ainslie et al., 2023). To maximize parameter efficiency, we design custom tokenizers for both games, significantly reducing the size of the language modeling head. This allows us to train smaller models than prior work (Gandhi et al., 2024; Shah et al., 2024) while maintaining comparable performance on both tasks. For CountDown, we use a 17M parameter model with a context length of 4096 tokens; for Selenium, we use a 38M model with the same context length. See Appendix C.3 for model architecture and an exhaustive list of training hyperparameters. We train all models until validation loss converges (see Appendix G.3). + +# 4 Empirical trade-offs of backtracking + +We first demonstrate that backtracking models do not universally outperform the direct solution models (Section 4.1) because backtracking models are restricted to learn a prescribed way of search (Section 4.2). We then identify two factors (Sections 4.3) showing how we might improve test-time scaling for backtracking models. + +# 4.1 Backtracking is not always beneficial + +Evaluation metrics. We evaluate model performances using solving accuracy on 200 unseen problems with binary scores (either correct or incorrect, no partial credits, see appendix C.1). We use FLOPs to compare inference costs (see Appendix D for FLOPs computation). For the backtracking model, we allow models to autoregressively generate and measure how many problems the model finds the correct solution at various CoT lengths (ranging from 1024 to 4096 tokens). For the direct solution model, we generate + +$n$ solutions in parallel through temperature sampling at $T = 0.7$ , and examine whether the model has found the correct solution within $n$ attempts (i.e., best-of- $n$ ). Best-of- $n$ is a suitable choice in those two games, a case where solving the task is hard but verification is trivial. In general, our analysis applies to tasks where verification can be easily done with an external verifier at test-time. This is definitely not always the case, and we leave the study of problems where test-time verification is not as easy to future work. In those tasks, one might need to consider majority voting or other strategies. See Appendix E for further discussions. + +Results. In Fig. 1 A, we observe distinct scaling behaviors for the two models. For both games, the direct solution model's test accuracy scales linearly with increased test-time compute (measured on a logarithmic scale). This scaling behavior indicates that through parallel sampling, the backtracking model generates diverse solutions that search through different solution paths. Conversely, the backtracking model exhibits sub-linear scaling: Longer solution traces disproportionately yield smaller accuracy improvements. We attribute the sub-linear scaling to two causes. First, as reasoning chains become longer, the backtracking model might struggle to effectively track visited states and efficiently search through the solution space. Second, when models perform sequential search, the computation cost grows quadratically with CoT length (due to the attention mechanism, see Appendix D), and this further makes backtracking model less effective for scaling up test time compute. Overall, for CountDown, the direct solution model consistently outperforms its backtracking counterpart. However, this trend is reversed in Selenium, where the backtracking model consistently achieves higher accuracy. + +# 4.2 Backtracking model learns both the good and the bad + +When teaching a child to correct math mistakes, the child understands that the goal is the correct answer—not making and then fixing errors. Humans have meta-cognitive awareness that models lack. Models trained via next-token prediction simply imitate the traces they see, including making the mistake before fixing it. In CountDown, this poses a key limitation: the backtracking model learns to follow the specific search paths seen in training. While some tasks—like shortest path finding—have optimal strategies we can supervise directly (e.g., Dijkstra's algorithm), most reasoning tasks, including CountDown, lack such guarantees. As a result, the model may be constrained by the inefficiencies in the backtracking data. In contrast, the direct solution model, trained only on correct answers, is free to discover more efficient strategies. In our subsequent analysis, we concretely show how the direct solution model successfully bypasses many inefficient search and backtracking steps learned by the backtracking model. + +# 4.2.1 Backtracking model finds the solution with fewer mistakes + +Measuring number of mistakes. We compare the number of mistakes made by: (1) DFS (used to generate backtracking data), (2) the backtracking model, and (3) the direct solution model. For DFS and the backtracking model, mistakes are counted as the number of incorrect terminal nodes explored before finding the correct solution. For the direct solution model, mistakes correspond to how many parallel samples ( $n$ in best-of- $n$ ) are needed. + +Comparing search strategies. We sort the 200 test problems based on mistakes made by DFS and plot mistakes for both models. Fig. 3 left compares DFS search and backtracking model. The number mistakes made by the backtracking model is correlated with the DFS backtracking trace. This observation is not surprising given that the backtracking model is trained on these traces. However, this result is interesting when we compare it against the direct solution model (Fig. 3 middle). The direct solution model solves most problems within fewer than 10 attempts—far fewer compared to DFS or the backtracking model. Fig. 3 right quantifies these observations. Specifically, for a fixed mistake budget, we use Jaccard Index to measure whether the model solves a similar set of problems as DFS solves. The backtracking model closely mirrors DFS search (high set similarity), whereas the direct solution model diverges significantly (low set similarity). Together with superior performance of the direct solution model, we conclude that the direct solution model learns more efficient search strategies, avoiding unnecessary explorations of wrong paths. + +![](images/f744918c025c250a03c698c53abc9f6b439ada25c09467401c31889e902dd09a.jpg) +Figure 4: Two different variations to improve backtracking model. (a). We hypothesize that the backtracking model can think one step ahead without sacrificing its ability to search. Therefore, we shorten the search trace by skipping the last search step. (b). Two data variations that improve the backtracking model. Mixed-backtrack model trained on a diverse set of search strategies. Think-backtracking model trained on shortened DFS trace. + +![](images/64eea15ad12a2ff0fc19f7e2b733fda8065aa921d4af04dc08e790c09f82a37d.jpg) + +# 4.3 Two ways to improve backtracking model + +Training on diverse set of search strategies. Our analysis suggests a clear direction for improving the backtracking model: using better search strategies to improve backtracking traces. Beyond DFS, we explored alternatives including Breadth-First Search (BFS) and various heuristic methods (see Appendix G.1). Despite these efforts, no single search strategy significantly outperformed DFS. Inspired by Gandhi et al. (2024), we trained a variant of the backtracking model—mix-backtrack model—using a diverse mixture of BFS and DFS strategies (32 in total), aiming to help the model discover more optimal search patterns. + +Backtracking model thinks less and talks more. Apart from learning suboptimal search strategies, another inefficiency in the backtracking model is caused by the model learns to be excessively verbose. Specifically, by requiring the model to explicitly output every step of the DFS, we may prevent it from internalizing part of the reasoning process. Concretely, we hypothesize that for CountDown, the model can internally plan at least one step ahead, allowing it to shorten its explicit reasoning trace without losing its ability to perform DFS. To test hypothesis, we train a variation—the think-backtrack model—on shortened DFS traces, skipping one intermediate step (Fig. 4, A). + +Mix-strategy results. Fig. 4 (B) compares this mixed-strategy model against the original backtracking and direct solution models. We also include a training data upper bound, representing perfect execution of the mixed search strategies. The mixed-strategy model improves over the original backtracking model and closely approaches its training-data upper bound. However, even with deliberate attempts to optimize search strategies, surpassing the performance of the direct solution model remains challenging. This experiment underscores the inherent difficulty in identifying superior handcrafted search traces. + +Think-backtrack results. Fig. 4 (B) also compares the performance of the think-backtrack model. By encouraging the model to internalize parts of the reasoning process, the think-backtrack model achieves performances comparable to the direct solution model. This result suggests that models with backtracking ability might produce long but ineffective CoT. By training the model to avoid making the mistakes at the first place, we reduce model morbidity without sacrificing its search capability, and in turn improving test-time-compute scaling. As an additional evidence, in Appendix G.2, we show that the think-backtrack model solves a superset of test problems solved by the original backtrack model. + +![](images/9b1b98b03adf16268a74f8dec7aaeb6508fe5869505875ad5daa40391ef2fe85.jpg) +Figure 5: Different scaling behaviors for backtracking versus direct solution model. CountDown (A). Backtracking model performance does not improve as we scale up model size. (B). The direct solution model improves (C). Direct solution model consistently outperforms backtracking model. Sodomu (D, E). Both models' performances improve as we scale up model size. (F). Direct solution model consistently underperforms backtracking model. + +# 5 Model size and tree depth impact the efficacy of backtracking + +While we've shown that backtracking might lead to ineffective test-time scaling, other factors also shape its effectiveness. In Section 5.1, we show that backtracking and direct solution models scale differently with model sizes. To explain the contrasting outcomes (Fig. 1 A) between CountDown and Selenium, in Appendix F, we show that task differences—particularly search tree depth—play a key role: deeper tasks like Selenium benefit more from backtracking. + +# 5.1 Dependence on model size + +We now investigate how model size impacts the performance of backtracking and direct solution models. We evaluate four model scales—3M, 17M, 38M, and 144M—by proportionally increasing the number of attention heads, embedding dimensions, and number of attention layers. Detailed model configurations can be found in Appendix C.2. + +CountDown. Scaling up model size improves the performance of the direct solution model (Fig. 5 B) across all test-time-compute budgets. When trained exclusively on correct solutions, larger models can independently discover highly effective search strategies. In contrast, the backtracking model shows no improvements with increased model sizes (Fig. 5 A). The lack of improvement from model scaling can be explained by training data: The performance of backtracking model is constrained by the quality of the backtracking traces used for training. As previously seen in Fig. 4 (right), the 17M backtracking model is already approaching the performance ceiling that is set by the training data. Training larger models on the same backtracking data would not lead to further performance improvements. Due + +to different scaling behaviors between backtracking and direct solution models, the gap in performances between two types of models widens with increasing model sizes (Fig. 5 C). + +Sudoku. Similar to CountDown, the performances of direct solution models improve with increased model sizes (Fig. 5 E). Unlike CountDown, however, the backtracking model also significantly benefits from scaling (Fig. 5 D). This difference can again be explained by examining the backtracking training data. Sodomu is inherently more complex than CountDown. The DFS backtracking traces successfully solve $97\%$ of test boards—far exceeding the current performance of all four tested model sizes. Because the backtracking model for Sodomu has not yet reached training data performance ceiling, increased model capacity leads to improved results. On the other hand, due to the complexity and large search space of the game, the backtracking models' performance gains start to diminish as the search traces become longer. As a result, the backtracking model consistently outperforms the direct solution model across scales, but the advantages diminishes at larger compute budgets (Fig. 5 E). + +# 6 GRPO: Learning beyond the imitation game + +So far, we have shown that under supervised learning, backtracking is not always optimal for scaling test-time compute. We now explore how further training both backtracking and direct solution models with reinforcement learning leads to qualitatively different outcomes. + +# 6.1 Continue training models with GRPO + +Recently, RL has become a popular approach to further enhance LLMs performance on challenging benchmarks such as MATH (Hendrycks et al., 2021) and AIME (AIME, 2024). Here, we study the effects of RL in a controlled setting, focusing on how it impacts a model's backtracking behaviors (sequential search) and as well as a model's parallel search capability (sampling with best-of- $n$ ). We take the CountDown backtracking and direct solution models, which have been trained to convergence under the supervised learning objective (see Appendix G.3 for training curves). We then continue training each model using GRPO (Shao et al., 2024), following verl's (Sheng et al., 2024) implementation. We perform GRPO on the same training data used for the supervised learning. As before, we evaluate performance across different test-time compute budgets. + +# 6.2 Backtracking model discovers new search strategies + +Figure 1 C shows that the backtracking model post GRPO sees an performance boost across all test-compute budgets. The post-GRPO model (dark red) reaches an accuracy comparable to the pre-GRPO direct solution model (light blue). This improvement is surprising for two reasons: (1) at maximum compute (4096 tokens), the model solves nearly $70\%$ of the test set—exceeding the performance of the DFS strategy used to generate training data $(57\%)$ ; and (2) the model was trained on questions it has already seen during supervised learning, with no new problems introduced during GRPO. + +These gains suggest that the backtracking model, once freed from the constraints of predicting next token on DFS traces, can now discover better search strategies. To concretely show that the backtracking model post-GRPO learns search strategies different from DFS training traces, we revisit the mistake-counting analysis from Section 4.2.1 (Figure 3). For each test problem, we compute the number of mistakes as before (i.e., counting how many incorrect terminal nodes are explored before reaching a correct solution). Using the same set similarity measure as before, we quantify the strategy deviation in Figure 6 (left). The smaller Jaccard index values confirm that through GRPO, the backtracking model has learned new and more effective search behaviors. In Appendix G.4, we also show the per-problem scatter plot as done in Figure 3. + +# 6.3 Direct solution model specializes at pass@1 + +We now show that compared to backtracking models, GRPO has remarkably different effects on direct solution models. As shown in Figure 1 C, the direct solution model post-GRPO achieves strong performance at the smallest compute budget (pass@1), solving $42.5\%$ of unseen CountDown puzzles (82 out of 200 test problems). None of the handcrafted search strategies (Appendix G.1) can reach such high accuracy. To understand the impressive gain + +![](images/74d0920a8d16477d6e33f418d0398f280da866ebfc2cffcf91324a04375b76a2.jpg) +Figure 6: GRPO has different effect on backtracking versus direct solution model Left: After GRPO, the backtracking model's search strategy starts to deviate away from the DFS search. Right: For problems the pre-GRPO direct solution model (blue) have a non-zero pass@k solving probabilities, the post-GRPO direct solution model (red) solves with pass@1. + +![](images/58fd3a6281a1bb9dfa15bece1a73c9939603a56ca0f044b45ec84822d61fe0f3.jpg) + +on 1-shot performance, we examine those 82 problems, and discover that the pre-GRPO direct solution model was able to find correct solution by sampling best-of- $n$ (with $n \leq 64$ ). We now examine a model's solving probabilities (i.e., measuring pass@k rate out of the 64 generations). We compare the pass@k rate for the diret solution model pre and post GRPO, shown in Figure 6, right. We rank the 200 test problems by the pre-GRPO model's solving probabilities. For problems that the pre-GRPO model has a non-zero pass@k rate, the post-GRPO model can solve most of them with pass@1. + +However, this improvement in 1-shot performance comes with a substantial trade-off: the model loses its ability to generate diverse solutions. As a result, when we perform parallel search using best-of- $n$ , the direct solution model post-GRPO fail to explore different solution paths, hurting its test-time-scaling effectiveness. Therefore, test-time compute scaling becomes ineffective as we increase compute budgets, forming a sharp contrast to the backtracking model's consistent improvements across the full compute budget. + +# 7 Conclusion and discussions + +In this work, we conducted a controlled empirical investigation into the efficacy of teaching backtracking to large language models (LLMs) as a method for scaling test-time computation. Using two strategic games, CountDown and Sudoku, we demonstrated that backtracking does not universally outperform parallel solution strategies; rather, its effectiveness depends significantly on task characteristics, model scale, and training approach. Appendix A, we show that our results in synthetic setting generalize: even in real-world reasoning tasks, backtracking is not always beneficial. Additionally, our reinforcement learning experiments uncovered a unique synergy between backtracking capabilities and RL-based training, enabling models to discover novel strategies. + +Limitations and future work. While our experiments relied on two strategic games (Count-Down and Sudo) and models trained from scratch—common practices for controlled studies—an important avenue for future research is extending our findings to complex, real-world reasoning tasks such as coding and mathematical problem-solving. For future work, developing precise ways to characterize tasks that benefit from backtracking will be valuable for guiding model training. Finally, while we intentionally created a dichotomy between pure backtracking and direct-solution models, real-world applications may require hybrid strategies that dynamically choose between direct generation and explicit search based on problem complexity. Investigating whether LLMs can autonomously optimize their reasoning modes, particularly through reinforcement learning paradigms, is a promising future direction for improving the flexibility and efficiency of model reasoning. + +# Acknowledgments + +We thank Core Francisco Park and Bingbin Liu for helpful discussions and feedback throughout the development of this work. TQ and DAM acknowledge support from the Kempner Institute, the Aramont Fellowship Fund, and the FAS Dean's Competitive Fund for Promising Scholarship. + +# References + +AIME. American invitational mathematics examination, 2024. URL https://maa.org/math-competitions/american-invitational-mathematics-examination-aime. +Joshua Ainslie, James Lee-Thorp, Michiel de Jong, Yury Zemlyanskiy, Federico Lebrón, and Sumit Sanghai. GQA: Training generalized multi-query transformer models from multi-head checkpoints. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2305.13245. +Afra Feyza Akyurek, Ekin Akyurek, Aman Madaan, Ashwin Kalyan, Peter Clark, Derry Wijaya, and Niket Tandon. RL4F: Generating natural language feedback with reinforcement learning for repairing model outputs. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2305.08844. +Zachary Ankner, Mansheej Paul, Brandon Cui, Jonathan D Chang, and Prithviraj Ammanabrolu. Critique-out-loud reward models. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2408.11791. +Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2407.21787. +Sehyun Choi, Tianqing Fang, Zhaowei Wang, and Yangqiu Song. KCTS: Knowledge-constrained tree search decoding with token-level hallucination detection. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2310.09044. +DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z F Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J L Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R J Chen, R L Jin, Ruyi Chen, Shanghai Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, S S Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun, T Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W L Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X Q Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y K Li, Y Q Wang, Y X Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong. Ying He,Yishi Piao,Yisong Wang,Yixuan Tan,Yiyang Ma,Yiyuan Liu,Yongqiang Guo. Yuan OuYuduan Wang Yue Gong.Yuheng ZouYujia HeYunfan XiongYuxiang Luo. Yuxiang YouYuxuan LiuYuyang ZhouYX ZhuYanhong Xu,Yanping Huang,Yaohui Li,Yi Zheng,Yuchen Zhu,Yunxian Ma,Ying Tang,Yukun Zha,Yuting YanZ Z Ren Zehui Ren,Zhangli Sha,Zhe Fu,Zhean XuZhenda XieZhengyan Zhang,Zhenwen Hao. Zhicheng Ma,Zhigang Yan,Zhiyu WuZihui GuZijia ZhuZijun LiuZilin LiZiwei Xie + +Ziyang Song, Zizheng Pan, Zhen Huang, Zhipeng Xu, Zhongyu Zhang, and Zhen Zhang. DeepSeek-R1: Incentivizing reasoning capability in LLMs via reinforcement learning. arXiv [cs.CL], 2025. URL http://arxiv.org/abs/2501.12948. +Kanishk Gandhi, Denise Lee, Gabriel Grand, Muxin Liu, Winson Cheng, Archit Sharma, and Noah D Goodman. Stream of search (SoS): Learning to search in language. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2404.03683. +Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective STaRs. arXiv [cs.CL], 2025. URL http://arxiv.org/abs/2503.01307. +Alex Havrilla, Sharath Raparthy, Christoforus Nalmpantis, Jane Dwivedi-Yu, Maksym Zhuravinskyi, Eric Hambro, and Roberta Raileanu. GLoRe: When, where, and how to improve LLM reasoning via global and local refinements. arXiv [cs.CL], 2024. URL http://arxiv.org/abs/2402.10963. +Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the MATH dataset. arXiv [cs.LG], 2021. URL http://arxiv.org/abs/2103.03874. +Zhenyu Hou, Xin Lv, Rui Lu, Jiajie Zhang, Yujiang Li, Zijun Yao, Juanzi Li, Jie Tang, and Yuxiao Dong. Advancing language model reasoning through reinforcement learning and inference scaling. arXiv [cs.LG], 2025. URL http://arxiv.org/abs/2501.11651. +Robert Irvine, Douglas Boubert, Vyas Raina, Adrian Liusie, Ziyi Zhu, Vineet Mudupalli, Aliaksei Korshuk, Zongyi Liu, Fritz Cremer, Valentin Assassi, Christie-Carol Beauchamp, Xiaoding Lu, Thomas Rialan, and William Beauchamp. Rewarding chatbots for real-world engagement with millions of users. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2303.06135. +Amirhossein Kazemnejad, Milad Aghajohari, Eva Portelance, Alessandro Sordoni, Siva Reddy, Aaron Courville, and Nicolas Le Roux. VinePPO: Unlocking RL potential for LLM reasoning through refined credit assignment. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2410.01679. +Aviral Kumar, Vincent Zhuang, Rishabh Agarwal, Yi Su, John D Co-Reyes, Avi Singh, Kate Baumli, Shariq Iqbal, Colton Bishop, Rebecca Roelofs, Lei M Zhang, Kay McKinney, Disha Shrivastava, Cosmin Paduraru, George Tucker, Doina Precup, Feryal Behbahani, and Aleksandra Faust. Training language models to self-correct via reinforcement learning. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2409.12917. +Ariel N Lee, Cole J Hunter, and Nataniel Ruiz. Platypus: Quick, cheap, and powerful refinement of LLMs. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2308.07317. +Kuang-Huei Lee, Ian Fischer, Yueh-Hua Wu, Dave Marwood, Shumeet Baluja, Dale Schuurmans, and Xinyun Chen. Evolving deeper LLM thinking. arXiv [cs.AI], 2025. URL http://arxiv.org/abs/2501.09891. +Noam Levi. A simple model of inference scaling laws. arXiv [stat.ML], 2024. URL http://arxiv.org/abs/2410.16377. +Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. arXiv [cs.LG], 2023. URL http://arxiv.org/abs/2305.20050. +Jiacheng Liu, Andrew Cohen, Ramakanth Pasunuru, Yejin Choi, Hannaneh Hajishirzi, and Asli Celikyilmaz. Don't throw away your value model! generating more preferable text with value-guided monte-carlo tree search decoding. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2309.15028. +Jieyi Long. Large language model guided tree-of-thought. arXiv [cs.AI], 2023. URL http://arxiv.org/abs/2305.08291. + +Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Meiqi Guo, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, and Abhinav Rastogi. Improve mathematical reasoning in language models by automated process supervision. arXiv [cs.CL], 2024. URL http://arxiv.org/abs/2406.06592. +Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv [cs.CL], 2025. URL http://arxiv.org/abs/2501.19393. +Aleksei Maslakov And Papadimas. Sudoku solver with step-by-step guidance, 2023. URL https://github.com/unmade/dokusan, 2023. +Debjit Paul, Mete Ismayilzada, Maxime Peyrard, Beatrix Borges, Antoine Bosselut, Robert West, and Boi Faltings. REFINER: Reasoning feedback on intermediate representations. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2304.01904. +Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive IntroSpEction: Teaching language model agents how to self-improve. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2407.18219. +David Radcliffe. 3 million crossword puzzles with ratings. Website, 2020. URL https://www.kaggle.com/datasets/radcliffe/3-million-sudoku-puzzles-with-ratings. +William Saunders, Catherine Yeh, Jeff Wu, Steven Bills, Long Ouyang, Jonathan Ward, and Jan Leike. Self-critiquing models for assisting human evaluators. arXiv [cs.CL], 2022. URL http://arxiv.org/abs/2206.05802. +Kulin Shah, Nishanth Dikkala, Xin Wang, and Rina Panigrahy. Causal language modeling can elicit search and reasoning capabilities on logic puzzles. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2409.10502. +Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Mingchuan Zhang, Y K Li, Y Wu, and Daya Guo. DeepSeekMath: Pushing the limits of mathematical reasoning in open language models. arXiv [cs.CL], 2024. URL http://arxiv.org/abs/2402.03300. +Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. HybridFlow: A flexible and efficient RLHF framework. arXiv preprint arXiv: 2409.19256, 2024. +Jianlin Su, Yu Lu, Shengfeng Pan, Ahmed Murtadha, Bo Wen, and Yunfeng Liu. RoFormer: Enhanced transformer with rotary position embedding. arXiv [cs.CL], 2021. URL http://arxiv.org/abs/2104.09864. +Peiyi Wang, Lei Li, Zhihong Shao, Runxin Xu, Damai Dai, Yifei Li, Deli Chen, Yu Wu, and Zhifang Sui. Math-shepherd: Verify and reinforce LLMs step-by-step without human annotations. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 9426-9439, Stroudsburg, PA, USA, 2024. Association for Computational Linguistics. URL http://dx.doi.org/10.18653/v1/2024.acl-long.510. +Tianlu Wang, Ping Yu, Xiaoqing Ellen Tan, Sean O'Brien, Ramakanth Pasunuru, Jane Dwivedi-Yu, Olga Golovneva, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. Shepherd: A critic for language model generation. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2308.04592. +Sean Welleck, Ximing Lu, Peter West, Faeze Brahman, Tianxiao Shen, Daniel Khashabi, and Yejin Choi. Generating sequences by learning to self-correct. arXiv [cs.CL], 2022. URL http://arxiv.org/abs/2211.00053. +Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models. arXiv [cs.AI], 2024. URL http://arxiv.org/abs/2408.00724. + +Yuxi Xie, Kenji Kawaguchi, Yiran Zhao, Xu Zhao, Min-Yen Kan, Junxian He, and Qizhe Xie. Self-evaluation guided beam search for reasoning. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2305.00633. +Huajian Xin, Daya Guo, Zhihong Shao, Zhizhou Ren, Qihao Zhu, Bo Liu, Chong Ruan, Wenda Li, and Xiaodan Liang. DeepSeek-prover: Advancing theorem proving in LLMs through large-scale synthetic data. arXiv [cs.AI], 2024. URL http://arxiv.org/abs/2405.14333. +An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, Keming Lu, Keqin Bao, Kexin Yang, Le Yu, Mei Li, Mingfeng Xue, Pei Zhang, Qin Zhu, Rui Men, Runji Lin, Tianhao Li, Tingyu Xia, Xingzhang Ren, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yu Wan, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zihan Qiu. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024. +Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. arXiv [cs.CL], 2023a. URL http://arxiv.org/abs/2305.10601. +Weiran Yao, Shelby Heinecke, Juan Carlos Niebles, Zhiwei Liu, Yihao Feng, Le Xue, Rithesh Murthy, Zeyuan Chen, Jianguo Zhang, Devansh Arpit, Ran Xu, Phil Mui, Huan Wang, Caiming Xiong, and Silvio Savarese. Retroformer: Retrospective large language agents with policy gradient optimization. arXiv [cs.CL], 2023b. URL http://arxiv.org/abs/2308.02151. +Seonghyeon Ye, Yongrae Jo, Doyoung Kim, Sungdong Kim, Hyeonbin Hwang, and Minjoon Seo. SelFee: Iterative self-revising LLM empowered by self-feedback generation. Blog post, 2023. URL https://kaistai.github.io/SelFee/. +Eric Zelikman, Yuhuai Wu, Jesse Mu, and Noah D Goodman. STaR: Bootstrapping reasoning with reasoning. arXiv [cs.LG], 2022. URL http://arxiv.org/abs/2203.14465. +Shun Zhang, Zhenfang Chen, Yikang Shen, Mingyu Ding, Joshua B Tenenbaum, and Chuang Gan. Planning with large language models for code generation. arXiv [cs.LG], 2023. URL http://arxiv.org/abs/2303.05510. +Yunxiang Zhang, Muhammad Khalifa, Lajanugen Logeswaran, Jaekyeom Kim, Moontae Lee, Honglak Lee, and Lu Wang. Small language models need strong verifiers to self-correct reasoning. arXiv [cs.CL], 2024. URL http://arxiv.org/abs/2404.17140. +Andy Zhou, Kai Yan, Michal Shlapentokh-Rothman, Haohan Wang, and Yu-Xiong Wang. Language agent tree search unifies reasoning acting and planning in language models. arXiv [cs.AI], 2023. URL http://arxiv.org/abs/2310.04406. + +![](images/c3231bcd3f7a4377e0bb50797ec0d78ce075ede1cd66eeda2dce0e11b6a62c45.jpg) +Math-500 Performance vs. Test Compute + +![](images/90573dfe71e77f9da3d2327c2e68b2211bc164de4f3632147e1b10f7612d2631.jpg) +GPQA Performance vs. Test Compute +Figure 7: Evaluating backtracking on real LLMs. Left: On MATH-500, we compare the S1 model (fine-tuned on backtracking traces) using sequential decoding with budget forcing, against its base model (Qwen2.5-32B-Instruct) using parallel sampling. The backtracking model underperforms at low compute but narrows the gap at higher budgets. Right: On GPQA, the same backtracking setup outperforms parallel sampling in a multiple-choice reasoning setting. This comparison generalizes our conclusion from synthetic settings to real LLMs. + +# A Backtracking Analysis on Math Reasoning with LLMs + +# A.1 Experimental Setup + +To complement our synthetic experiments, we conduct an evaluation on real-world math problems to examine whether backtracking remains effective under equal test-time compute. We compare two approaches: + +- Backtracking model: fine-tuned on solution traces that include explicit self-correction and step-by-step reflection. +- Direct solution model: the base model without backtracking fine-tuning, using parallel sampling (with majority voting for final correct answer) at inference. + +To control test-time compute, we use the budget forcing technique introduced in (Muen-nighoff et al., 2025). This enables a fair comparison across models with differing reasoning styles. + +Backtracking Model. We adopt the S1 checkpoint from Muennighoff et al. (2025), a model trained on solution traces distilled from DeepSeekR1. These traces exhibit explicit backtracking behaviors—identifying and correcting earlier mistakes. We generate outputs with temperature $T = 0.7$ under budget forcing and evaluate on the MATH-500. + +Direct Solution Model. For fair comparison, we use the same base model as S1—Qwen2.5-32B-Instruct—without backtracking fine-tuning. We sample $N = 1$ to 8 completions with temperature $T = 0.7$ , and report both Best-of- $N$ and Majority Vote accuracy. + +# Results and Interpretation + +Figure 7 (left) presents accuracy under matched compute budgets. We observe that at low compute budgets the backtracking model underperforms due to its verbose reasoning traces. At higher budgets, backtracking matches and slightly exceeds the performance of parallel sampling. This mirrors trends observed in the CountDown (Section 4.3), and suggests that while backtracking introduces overhead, it yields benefits when sufficient compute is available. + +To form a sharp contrast, we reproduce results from (Muennighoff et al., 2025) on GPQA-Diamond (Figure 7, right), which shows that the same backtracking model significantly outperforms parallel sampling—even at lower budgets—in a multiple-choice setting. This contrast highlights that the effectiveness of backtracking is task-dependent. + +This real-world evaluation supports our synthetic findings: backtracking improves performance under compute constraints, but its advantage depends on the task structure. On open-ended math problems, the benefit is most pronounced at higher budgets. On structured tasks like multiple-choice QA, gains can appear even earlier. Overall, our conclusions generalize beyond synthetic settings. + +# B Related Work Extended + +# B.1 Test-time computation scaling + +A growing body of work has explored how to improve language model performance by scaling test-time computation. These approaches typically fall into two broad categories: parallel and sequential search. Parallel methods sample multiple solutions independently and select the best one using predefined criteria—such as majority voting or external reward models—as seen in Best-of- $N$ techniques (Brown et al., 2024; Irvine et al., 2023; Levi, 2024). These methods often rely on outcome-based reward models that score complete solutions (Xin et al., 2024; Ankner et al., 2024). + +In contrast, sequential methods iteratively refine reasoning by conditioning on previous attempts. This class includes stepwise improvement methods(Ankner et al., 2024; Hou et al., 2025; Lee et al., 2025), where each new trajectory builds on earlier outputs, enabling the model to adapt its reasoning dynamically. Other research works have also explored using the search process itself to improve model reasoning capabilities, either during inference or by integrating the feedback into training(Wang et al., 2024; Luo et al., 2024). While these methods can reduce redundancy, they typically require more compute per sample and may suffer from compounding errors. + +Tree-based approaches, such as Monte Carlo Tree Search (MCTS) and guided beam search, represent a hybrid between parallel and sequential strategies(Gandhi et al., 2024; Liu et al., 2023; Zhang et al., 2023; Zhou et al., 2023; Choi et al., 2023; Xie et al., 2023). These methods often leverage process reward models, which assign value to intermediate reasoning steps rather than full outputs(Lightman et al., 2023; Wang et al., 2024; Wu et al., 2024). REBASE(Wu et al., 2024), for example, uses a process reward model to guide exploration and pruning in tree search, and has been shown to outperform both sampling-based methods and traditional MCTS. + +# B.2 Self-correction and backtracking + +Search and backtracking are inherently tied to self-correction, as they enable models to revisit earlier decisions and recover from errors—a critical capability for multi-step reasoning. Teaching language models to self-correct has been approached through fine-tuning on revision demonstrations from humans or stronger models(Saunders et al., 2022; Ye et al., 2023; Qu et al., 2024), as well as through synthetic data generation and handcrafted augmentation(Paul et al., 2023; Wang et al., 2023; Lee et al., 2023). Reward-based methods provide another avenue, using outcome- or process-level signals to differentiate good and bad reasoning trajectories, often framed as implicit policy learning(Welleck et al., 2022; Akyurek et al., 2023; Zhang et al., 2024). Some methods further incorporate search, critique generation, or separate correction modules to enhance reasoning quality(Yao et al., 2023b; Havrilla et al., 2024). In contrast, using two structured games, we investigate the tradeoffs of teaching models to backtrack via search traces versus allowing them to learn purely from correct solutions. + +# B.3 Reinforcement learning for LLM reasoning + +Reinforcement learning (RL) has emerged as a powerful framework for improving the reasoning abilities of language models. While early work applied off-policy and on-policy RL methods to guide models toward verifiable outcomes(Zelikman et al., 2022; Kazemnejad et al., 2024), recent approaches have shown that even simplified algorithms like GRPO can lead to significant performance gains and the emergence of in-context search behavior (DeepSeek-AI et al., 2025; Shao et al., 2024; DeepSeek-AI et al., 2025). These advances suggest that RL can help models autonomously discover more effective reasoning strategies, even without explicit reward models or structured search. However, not all models benefit + +equally from RL, and it remains unclear what properties make a model amenable to learning through reinforcement. Our work contributes to this question by examining how backtracking models, when trained with GRPO, can discover novel solution strategies—while no-backtracking models show limited or mixed gains. + +# C Experiment details + +# C.1 Additional details on game, data generation + +CountDown tree size computation. CountDown has an exponentially growing search space with respect to the number of candidate numbers. If the current state has $N$ available numbers, there are $\binom{N}{2} \times 4$ possible actions (selecting a pair and one of four operations), and the depth of the tree is $N - 1$ . For games with four candidate numbers, the complete search tree contains 1,152 nodes. + +CountDown search. To generate DFS search data, we use a sum heuristic to guide the search order and prune nodes. This heuristic measures the distance between the sum of all input numbers and the target number, and prunes a node if the absolute distance exceeds the target. This approach is inspired by Gandhi et al. (2024), who also consider an alternative—the multiply heuristic—which measures the minimum distance between the input set and the factors of the target. However, in our experiments, both heuristics yield similar performance: for a fixed number of visited nodes, DFS with either heuristic solves approximately the same number of games. + +Sudoku rule. In a Sudoku game, the player is given a $9 \times 9$ grid in which each cell must be filled with a digit from 1 to 9. The puzzle is subject to three constraints: each row, each column, and each of the nine $3 \times 3$ subgrids must contain all digits from 1 to 9 exactly once. Given a partially filled grid, the objective is to fill in the remaining cells such that all constraints are satisfied. + +Sudoku data and tokenization. To represent theiku board for language models, we encode each cell as a position-value pair: $(x,y) = v$ , where $(x,y)$ denotes the grid location and $v$ is the cell's value. The model receives the initial board as a list of known $(x,y) = v$ pairs and generates the solution by predicting the values for the remaining cells. We generate backtracking traces by serializing the full DFS traversal. For the direct solution model, we prune each trace to include only the final solution path. + +Scoring. For CountDown, a solution is correct only if it adheres to game rules and correctly achieves the target number. For Selenium, correctness requires fully solving the board, with no partial credit given for incomplete but correct boards. Models are tested on 200 unseen problems per game. The same scoring function is used as the reward function in GRPO (Section 6) + +# C.2 Additional details on model architecture + +Model hyperparameters can be found in Table 1. + +
Model SizeHidden SizeLayersAttn HeadsIntermediate SizeKV Heads
3M256645121
17M5128410241
38M51210820482
144M102412830722
+ +Table 1: Qwen2.5-style architecture configurations for the four model sizes used in our experiments. + +# C.3 Training hyperparameter + +Training hyperparameters can be found in Table 2. We train all models on 2 NVIDIA H100 80GB HBM3 GPUs. + +
HyperparameterValue
Optimization
Learning rate1 × 10-5
Weight decay0.01
Learning Rate Schedule
Scheduler typeCosine
Warmup steps1
Training Setup
Epochs30
Batch size (backtracking model)32
Batch size (direct solution model)64
Context length (backtracking model)4096
Context length (direct solution model)512
Tokenizer
Tokenizer size (CountDown)74
Tokenizer size (Sudoku)110
+ +Table 2: Training hyperparameters used for all experiments. Batch size and context length vary based on model type. + +# D FLOP computation + +To compare backtracking and direct solution models under a fixed compute budget, we estimate inference FLOPs based on model architecture and generation length $T$ . We use a simplified transformer FLOP computation that accounts for per-token operations across all layers. + +Below is a list of architectural and generation parameters: + +- $d_{\mathrm{model}}$ : hidden dimension +$d_{\mathrm{kv}}$ : key/value dimension +- $d_{\mathrm{ff}}$ : intermediate (feedforward) dimension +- $L$ : number of layers +- $T$ : number of generated tokens (i.e., context length) +- $N$ : number of sequences generated (e.g., in best-of- $N$ sampling) + +# D.1 Step-by-step FLOPs Calculation + +1. Per-layer linear FLOPs per token. We break down the linear FLOPs for each transformer layer into attention and MLP components: + +# Self-attention: + +- Query projection: $d_{\mathrm{model}} \times d_{\mathrm{model}}$ +- Key projection: $d_{\mathrm{model}} \times d_{\mathrm{kV}}$ +- Value projection: $d_{\mathrm{model}} \times d_{\mathrm{kv}}$ +- Output projection: $d_{\mathrm{model}} \times d_{\mathrm{model}}$ + +This results in a total of: + +$$ +\mathrm {F L O P s} _ {\text {a t t e n t i o n - l i n e a r}} = 2 d _ {\text {m o d e l}} ^ {2} + 2 d _ {\text {m o d e l}} d _ {\mathrm {k v}} +$$ + +# MLP (Feedforward): + +MLP layers include following components: + +- Gate projection +- Up projection +- Down projection + +Each of these MLP layers costs: $d_{\mathrm{model}} \times d_{\mathrm{ff}}$ , giving: + +$$ +\mathrm {F L O P s} _ {\mathrm {m l p}} = 3 d _ {\mathrm {m o d e l}} d _ {\mathrm {f f}} +$$ + +Combining both components, the total per-token linear cost per layer is: + +$$ +\mathrm {F L O P s} _ {\mathrm {l i n e a r}} = 2 d _ {\mathrm {m o d e l}} ^ {2} + 2 d _ {\mathrm {m o d e l}} d _ {\mathrm {k v}} + 3 d _ {\mathrm {m o d e l}} d _ {\mathrm {f f}} +$$ + +2. Quadratic attention cost. Self-attention involves computing interactions between all token pairs, resulting in a quadratic cost: + +$$ +\mathrm {F L O P s} _ {\text {a t t e n t i o n}} = d _ {\text {m o d e l}} \cdot \frac {T (T + 1)}{2} +$$ + +3. Total generation cost per sequence. Each token attends to all previous tokens across all $L$ layers. The generation cost for a single sequence is: + +$$ +\mathrm {F L O P s} _ {\mathrm {g e n}} = L \cdot \left(\mathrm {F L O P s} _ {\mathrm {l i n e a r}} \cdot T + \mathrm {F L O P s} _ {\mathrm {a t t e n t i o n}}\right) +$$ + +4. Total inference FLOPs. For $N$ sequences (e.g., best-of- $N$ sampling), the total inference cost is: + +$$ +\mathrm {F L O P s} _ {\text {t o t a l}} = N \cdot \mathrm {F L O P s} _ {\text {g e n}} +$$ + +We do not include auxiliary operations such as token embedding and softmax, weight norm, as their contribution is negligible compared to the transformer layers. All FLOPs reported in our experiments use this formula, with model configurations listed in Table 1. + +# E Majority voting versus best-of-n + +In this work, we primarily use the best-of- $n$ metric to evaluate the direct solution model. This metric is suitable for tasks where verifying the correctness of a solution is trivial, whereas solving the task itself is challenging. Many real-world problems, such as coding tasks and combinatorial optimization, fall into this category. Conversely, for problems where verification is difficult, metrics such as majority voting may be more appropriate. + +To illustrate this point, we additionally evaluate the CountDown direct solution model using both metrics in Figure 8. For majority voting, we generate n solutions per test problem, select the most frequently occurring solution (breaking ties randomly), and evaluate its correctness. + +We find that the majority-voting performance closely approximates the direct solution model's one-shot accuracy (i.e., best-of- $n$ with $n = 1$ ). However, majority voting is less suitable for our task for several reasons. First, the CountDown game frequently has multiple correct solutions, so selecting the majority solution path can fail to detect cases where the model generates different but equally valid solutions. Second, while majority voting is appropriate in real-world LLM scenarios—such as mathematical reasoning—where distinct solution paths converge to the same final boxed answer, in our synthetic setting, where models are trained from scratch, majority voting essentially becomes a noisy proxy for greedy decoding (sampling at temperature $T = 0$ ). Thus, we expect and observe majority voting accuracy to closely track pass@1 accuracy. + +In summary, given the characteristics of our task and the controlled experimental setup, best-of- $n$ remains a valid and preferred metric for evaluating direct solution models. + +# F Dependence on depth of the search tree + +# F.1 Search tree depth + +Why do backtracking models perform well on Selenium but underperform on CountDown, even when both are trained on DFS search traces? We argue that task characteristics—particularly those beyond our control in real—world settings—play a key role in determining whether backtracking is test-time-compute-efficient. A major difference between the two games lies in the depth of their search trees (Figure 2). In hard Selenium puzzles, only 20 out of 81 cells are pre-filled, leaving 50–60 cells to solve. This results in deep search trees with + +![](images/932547a1aae0442be0b8ed55b64d894efab339f8a1e422c141b2a42aeb51fdfc.jpg) +Figure 8: Majority voting versus best-of- $n$ for CountDown direct solution model. For CountDown, verification is much easier than solving the problem. Therefore, best-of- $n$ as a performance is justified. Additionally, we also examine majority voting performance. However, CountDown solutions are not unique, majority voting is not the most suitable way to measure model performances. + +![](images/c0806c8c2a5e96f1f7a0026e9c74be9031e96165b9ef739ed5759bc84f66787e.jpg) +Figure 9: The efficacy of backtracking depends on search tree depth. Left: We introduce a variation of the CountDown game—stacked CountDown—to increase the search tree depth. In the original CountDown game (solid bars), the direct solution model consistently outperforms the backtracking model, shown by a positive performance gap. In the stacked version (slanted bars), this gap is significantly reduced or even reversed, indicating that backtracking becomes more compute-efficient at greater depths. Right: We introduce a variation of Sudoku—easy Sudoku—where the initial board has more pre-filled cells to decrease the search tree depth. In the original setting, the 38M direct solution model (bottom, solid bars) underperforms the backtracking model. In the shallow Sudoku variant (slanted bars), the performance gap narrows across compute budgets. For the 17M models (top), the results are less conclusive. + +![](images/f6b5c72489b87d26c48b44d4f55f9ca147976d6fd1d61acc75e1b1ed0f2b685f.jpg) + +extensive trial-and-error, with many backtracking steps. In contrast, CountDown (in our setup) uses 4 candidate numbers, limiting the search tree depth to just 3. We hypothesize that backtracking models excels at tasks with deeper search trees, while shallow trees make parallel strategies (i.e., direct solution model) more effective. To test this, we design a variant of CountDown with increased search depth and a variant of Sudoku with reduced depth. + +# F.2 A deeper CountDown + +Set up To increase the search tree depth in CountDown, one might naively scale up the number of candidate numbers. However, this approach quickly leads to exponential growth in tree width: with 4 candidates, the tree contains 1,152 nodes; with 5 candidates, it grows to 46,080. To prevent the exponential growth in the number of search paths, we design a stacked CountDown variant that increases depth while controlling tree width. In this setup, the player is given 8 candidate numbers and a final target. The first 4 numbers must be used + +to reach the 5th number ("a partial goal"), and the remaining 4 numbers must then be used to reach the final target. This effectively stacks two CountDown problems, increasing depth without combinatorial explosion. We generate training data for both backtracking and no-backtracking models following the same procedure as in Section 3.1.2, with examples provided in Appendix H (Figure 16). We train a 17M as well as a 38M model until validation loss has converged, and test on 200 unseen problems. + +Results In Figure 9 (left), we compare the performance gap between the direct solution model and the backtracking model, measured by the difference in test accuracy. In the original CountDown setting (solid bars), the direct solution model consistently outperforms the backtracking model across all test compute budgets. However, in the stacked CountDown variant (slanted bars), the performance gap narrows significantly—and in some cases, reverses. The sign reverse indicates the backtracking model now outperforms the direct solution model. These results support our hypothesis: in CountDown, backtracking becomes more compute-efficient as the search tree depth increases. We observe this trend across both 17M and 38M models. + +# F.3 A shallower Selenium + +Set up To reduce the search tree depth in Selenium, we generate easier boards by increasing the number of initially filled cells. Specifically, we take the original 3M Selenium dataset Radcliffe (2020) and apply the direct solution model (Section 3.2.2) to correctly fill 10 additional cells. This increases the average number of pre-filled cells from 20 to around 30, effectively decreasing search tree depth. We generate both backtracking and direct solution training data following the same procedure in Section 3.2.2. Models with 17M and 38M parameters are trained to convergence and evaluated on 200 unseen problems. + +Results In Figure 9 (right), we show the performance gap between the direct solution and backtracking models, measured by the difference in test accuracy. In the original (hard) Sudoku setting, the 38M direct solution model consistently underperforms the backtracking model, as indicated by the negative gaps (solid green bars). In the shallow-Sudoku variant (slanted bars), these gaps are reduced across all test-time compute budgets for the 38M model. The trend is less clear for the 17M model, where the performance difference remains small in both settings. Overall, these results support our hypothesis: in Sudoku, backtracking becomes more test-time-compute-efficient when the search tree is deeper. + +# G Additional results + +# G.1 Exploring different CountDown strategies + +We analyze different search strategies for CountDown, including DFS and BFS with varying beam widths. For each strategy, we tokenize the resulting backtracking trace and measure number of tokens used in each search trace. The goal is to identify which strategy that finds correct solutions with the fewest tokens (Figure 10). The results show no clear winner. BFS with a smaller beam width produces shorter traces by exploring fewer nodes, but this comes at the cost of missing correct solutions more frequently. Increasing the beam width improves solution coverage but leads to longer traces due to broader exploration. + +In contrast, DFS produces more uniformly distributed trace lengths but suffers from a specific failure mode: it may prune the correct path early and terminate prematurely. These failures appear as short but incorrect traces, visible as the left-most orange bars in Figure 10 (bottom). + +# G.2 Compare think-backtrack and backtrack + +Table 3 further shows a confusion matrix comparing the original and think-backtrack models. The backtracking model solves 102 test problems in total with maximum test-time compute budget (4096 tokens). Out of those 102 problems, the think-backtrack model solves most of them. This evidence further shows that by training on shortened search traces, the model learns to internalize parts of its thinking without sacrificing performances. + +![](images/fc56edd9b4072e1423ef98a3d12b83f40442c91276c3388b7c61b2accbcc2e7f.jpg) +Figure 10: Examine different search strategies for CountDown. Beyond DFS, we experiment with Bread-First-Search (BFS) with different beam widths. We tokenize the search trace and measure the number of tokens as search length. There is not one search algorithm that is optimal to generate both short and correct solution traces. + +
T-B CorrectT-B Incorrect
B Correct8319
B Incorrect4157
+ +Table 3: Confusion matrix between Think-Backtrack (T-B) and Backtrack (B) models. + +# G.3 Supervised learning training curve + +During training, we set the maximum epochs to 30 epochs and allow early stopping. All models converge before 30 epochs and we early stop training when the validation loss has converged on log-log scale. Figure 11, 12 show the training curve for both models and for CountDown and Selenium. + +# G.4 Additional GRPO plots + +In Figure 3 (Section 4.2.1), we used the number of mistakes as a proxy for comparing search strategies. To further demonstrate that the backtracking model fine-tuned with GRPO discovers new strategies, we repeat the same analysis in Figure 13 (right). Compared to the original backtracking model (Figure 13, left), the post-GRPO model solves many problems with a different number of mistakes than the number of mistakes made by DFS. This shift indicates that the model is no longer tightly aligned with the original search trace and has discovered alternative, more diverse solution paths. Figure 6 (left) quantifies the above qualitative observation. + +# H Data sample + +Figure 14 shows an example of a CountDown game and the training data. Figure 15 shows an example of a Sudoku game and the training data. Figure 16 shows an example of stacked-CountDown variation and the training data. + +![](images/70e27d83371e2162ae2ad3312e8604d72f509e110ba9bf9f581d4ee15f31c6b6.jpg) + +![](images/a7f0cb936326cfeac7c67d4002567f49d6d2af471738d349c7d0ed30b32045bb.jpg) + +![](images/8eb41fd414170e86a8c76ead06fd22df26d60ebd1999e2cf7e9f99d3edb8492a.jpg) +Figure 11: CountDown validation loss. Left: Backtracking model. Right: Direct solution model. + +![](images/ae64856c00c49718b08575467901ac63bde0a1130a794f68a3fc2292876c436c.jpg) +Figure 12: Sodomu validation loss. Left: Backtracking model. Right: Direct solution model. + +![](images/926acdc15b5f62a9030e2b341cc566625d663dc62c206a1cfe0ddd4f77598eea.jpg) +Figure 13: Backtracking model can discover some new search strategies. Post GRPO, the backtracking model discover new strategies: In the right panel, for each problem, the post-GRPO model makes a different number of mistakes compared to DFS trace. + +![](images/e36fe3888388f8faacc648f6647e91131e8f15d326a7c7ed9d7a8e014287a021.jpg) + +# Example of a CountDown game + +Target: 16 Candidates: [96, 11, 78, 22] + +# DFS Search Trace + +```txt +Current State: 16:[96, 11, 78, 22], Operations: [ ] +Exploring Operation: 96-78=18, Resulting Numbers: [11, 22, 18] +Generated Node #0,0: 16:[11, 22, 18] Operation: 96-78=18 +Moving to Node #0,0 +Current State: 16:[11, 22, 18], Operations: ['96-78=18'] +Exploring Operation: 22-11=11, Resulting Numbers: [18, 11] +Generated Node #0,0,0: 16:[18, 11] Operation: 22-11=11 +Moving to Node #0,0,0 +Current State: 16:[18, 11], Operations: ['96-78=18', '22-11=11'] +Exploring Operation: 18-11=7, Resulting Numbers: [7] +7,16 unequal: No Solution +Moving to Node #0,0,0 +Current State: 16:[18, 11], Operations: ['96-78=18', '22-11=11'] +Exploring Operation: 18+11=29, Resulting Numbers: [29] +29,16 unequal: No Solution +Moving to Node #0,0 +Current State: 16:[11, 22, 18], Operations: ['96-78=18'] +Exploring Operation: 18-11=7, Resulting Numbers: [22, 7] +Generated Node #0,0,1: 16:[22, 7] Operation: 18-11=7 +Moving to Node #0,0,1 +Current State: 16:[22, 7], Operations: ['96-78=18', '18-11=7'] +Exploring Operation: 22-7=15, Resulting Numbers: [15] +15,16 unequal: No Solution +Moving to Node #0,0,1 +Current State: 16:[22, 7], Operations: ['96-78=18', '18-11=7'] +Exploring Operation: 22+7=29, Resulting Numbers: [29] +29,16 unequal: No Solution +Moving to Node #0,0 +Current State: 16:[11, 22, 18], Operations: ['96-78=18'] +Exploring Operation: 22/11=2, Resulting Numbers: [18, 2] +Generated Node #0,0,2: 16:[18, 2] Operation: 22/11=2 +Moving to Node #0,0,2 +Current State: 16:[18, 2], Operations: ['96-78=18', '22/11=2'] +Exploring Operation: 18-2=16, Resulting Numbers: [16] +16,16 equal: Goal Reached +``` + +# Direct Solution Trace + +```txt +Current State: 16:[96, 11, 78, 22], Operations: [ ] +Exploring Operation: 96-78=18, Resulting Numbers: [11, 22, 18] +Generated Node #2: [11, 22, 18] from Operation: 96-78=18 +Current State: 16:[11, 22, 18], Operations: ['96-78=18'] +Exploring Operation: 22/11=2, Resulting Numbers: [18, 2] +Generated Node #3: [18, 2] from Operation: 22/11=2 +Current State: 16:[18, 2], Operations: ['96-78=18', '22/11=2'] +Exploring Operation: 18-2=16, Resulting Numbers: [16] +16,16 equal: Goal Reached +``` + +Figure 14: Example of training data for CoutnDown. Top: game setup. Middle: full DFS search trace for training backtracking model. Bottom: correct solution path for training direct solution model. + +Initial Board + +
7245
17
839
51
297
32
84
4789
6213
+ +Solved Board + +
712498635
964135278
583726194
876954312
239861457
451372986
328549761
147683529
695217843
+ +# Example of a Sudoku game + +START $(0, 0) = 7$ $(0, 2) = 2$ $(0, 3) = 4$ $(0, 8) = 5$ $(1, 3) = 1$ $(1, 7) = 7$ $(2, 1) = 8$ $(2, 2) = 3$ $(2, 7) = 9$ $(3, 4) = 5$ $(3, 7) = 1$ $(4, 0) = 2$ $(4, 2) = 9$ $(4, 8) = 7$ $(5, 3) = 3$ $(5, 5) = 2$ $(6, 2) = 8$ $(6, 4) = 4$ $(7, 1) = 4$ $(7, 2) = 7$ $(7, 4) = 8$ $(7, 8) = 9$ $(8, 0) = 6$ $(8, 3) = 2$ $(8, 4) = 1$ $(8, 8) = 3$ solving + +# DFS Search Trace + +SOL_START \((4, 4) = 6\) \((8, 2) = 5\) \((4, 3) = 8\) \((8, 1) = 9\) \((8, 5) = 7\) GUESS: \((0, 1)\) \([1, 6] = 1\) \((0, 1) = 1\) GUESS: \((0, 4)\) \([3, 9] = 3\) \((0, 4) = 3\) GUESS: \((0, 6)\) \([6, 8] = 6\) \((0, 6) = 6\) \((0, 7) = 8\) \((8, 7) = 4\) \((0, 5) = 9\) \((1, 4) = 2\) \((1, 8) = 4\) \((2, 4) = 7\) \((3, 5) = 1\) \((4, 5) = 9\) \((8, 6) = 8\) \((1, 2) = 6\) \((1, 6) = 3\) \((3, 3) = 7\) \((1, 1) = 5\) \((1, 5) = 8\) \((2, 0) = 4\) \((4, 1) = 3\) \((4, 7) = 5\) \((5, 6) = 4\) \((5, 7) = 6\) \((5, 8) = 8\) \((6, 1) = 2\) \((7, 7) = 2\) \((1, 0) = 9\) \((3, 0) = 8\) \((3, 1) = 6\) \((3, 8) = 2\) \((5, 1) = 7\) \((5, 2) = 1\) \((2, 8) = 1\) \((3, 6) = 9\) \((5, 0) = 5\) \((6, 8) = 6\) \((2, 6) = 2\) NO_CANDIDATE: \((3, 2)\) NO CANDIDATE: \((0, 6)\) REVERT: \((0, 6)\) \([6, 8] =\) NONE GUESS: \((0, 6)\) \([6, 8] = 8\) \((0, 6) = 8\) \((0, 7) = 6\) \((8, 6) = 4\) \((8, 7) = 8\) \((0, 5) = 9\) \((1, 4) = 2\) \((1, 6) = 3\) \((1, 8) = 4\) \((2, 4) = 7\) \((3, 5) =\) \(4\) \((4, 5) = 1\) \((4, 6) = 5\) \((5, 4) = 9\) \((5, 6) = 6\) \((5, 7) = 4\) \((5, 8) = 8\) \((1, 2) = 6\) \((3, 3) = 7\) \((3, 8) = 2\) \((4,\) \(1) = 3\) \((5,2) = 1\) \((6,1) = 2\) \((6,7) = -2\) \((1,1) = -5\) \((1,5) = -8\) \((2,0) = -4\) \((2,8) = -1\) \((3,0)=\( \(8(3,1) = -6(3,6) = -7(7,0)=3(6,0)=1\text{NO\_CANDIDATE}:(3,2)\text{NO\_CANDIDATE}:(0,6)\text{revert}:(0,6)[6,(8)]=\)\(\text{NO\_CANDIDATE NO\_CANDIDATE}:(0,4)\text{REVERT}:(0,4)[3,9]=\text{NONE GUESS}:(0,4)[3,9]=9(0,4)=9(5,4)=7(2,4)=2(3,3)=9(3,5)=4(4,5)=1(1,4)=3(3,2)=6(5,1)=5(1,1)=6(1,2)=4(2,\)\)\(\begin{array}{r}\emptyset=5(2,5)=6(4,1)=3(5,2)=1(6,1)=2(0,5)=8(1,\emptyset)=9(1,5)=5(2,3)=7(3,\emptyset)=8\\(3,\text{1})=7(3,\text{8})=2(5,\emptyset)=4(7,\text{5})=3(1,\text{8})=8(3,\text{6})=3(5,\text{8})=6(6,\text{5})=9(6,\text{8})=1(7,\emptyset)=\\1(0,\emptyset)=6(0,\emptyset)=3(1,\emptyset)=2(2,\emptyset)=4(5,\emptyset)=8(6,\emptyset)=3(7,\emptyset)=5(8,\emptyset)=4(2,\emptyset)=1(4,\emptyset)\end{array}\) \(= (3,1) = -7(3,\text{8}) = -6(4,\text{7}) = -3(1,\text{6}) = -2(2,\text{8}) = -4(5,\text{7}) = -8(6,\text{0}) = -3(7,\text{7}) = -2(8,\text{6}) = -8(6,\text{3}) = -5\mathrm{SOL}\quad\mathrm{END}\) + +# Correct Solution + +$\begin{array}{l}\mathrm{SOL\_START(4,4) = 6(8,2) = 5(4,3) = 8(8,1) = 9(8,5) = 7(0,1) = 1(0,4) = 9(5,4) = 7(2,4) =}\\ \mathrm{2(3,3) = 9(3,5) = 4(4,5) = 1(1,4) = 3(3,2) = 6(5,1) = 5(1,1) = 6(1,2) = 4(2,0) = 5(2,5)}\\ \mathrm{= 6(4,1) = 3(5,2) = 1(6,1) = 2(0,5) = 8(1,0) = 9(1,5) = 5(2,3) = 7(3,0) = 8(3,1) = 7(3,}\\ \mathrm{8) = 2(5,0) = 4(7,5) = 3(1,8) = 8(3,6) = 3(5,8) = 6(6,5) = 9(6,8) = 1(7,0) = 1(0,6) = 6}\\ (\emptyset ,7) = 3(1,6) = 2(2,8) = 4(5,7) = 8(6,\emptyset) = 3(7,6) = 5(8,7) = 4(2,6) = 1(4,6) = 4(4,7) = \\ \mathrm{5(5,6)} = 9(6,6) = 7(6,7) = 6(7,3) = 6(7,7) = 2(8,6) = 8(6,3) = 5\mathrm{SOL\_END} \end{array}$ + +Figure 15: Example of training data for Sudoku game. Top: initial puzzle setup. Middle: full search trace with guesses and backtracking (tabs used). Bottom: final correct solution. + +# Example of a stacked-CountDown game + +Target: 96 Candidates: [22, 77, 24, 48, 31, 12, 36, 35] + +# DFS Search Trace + +```yaml +Current State: 96:[22, 77, 24, 48, 31, 12, 36, 35], Operations: [] Exploring Operation: $77 - 48 = 29$ ,Resulting Numbers: [22, 24, 29, 31, 12, 36, 35] Generated Node #0,0:96:[22, 24, 29, 31, 12, 36, 35]Operation: $77 - 48 = 29$ Moving to Node $\# 0,0$ Current State:96:[22, 24, 29, 31, 12, 36, 35],Operations:['77-48=29'] Exploring Operation: $22 + 24 = 46$ ,Resulting Numbers: [29, 46, 31, 12, 36, 35] Generated Node $\# 0,0,0$ :96:[29,46,31,12,36,35]Operation: $22 + 24 = 46$ Moving to Node $\# 0,0,0$ Current State:96:[29,46,31,12,36,35],Operations:'77-48=29', '22+24=46' ] Exploring Operation: $46 - 29 = 17$ ,Resulting Numbers: [17,31,12,36,35] 17,31 unequal Moving to Node $\# 0,0$ ... . . . Current State:96:[29,2,31,12,36,35],Operations:'77-48=29', '24-22=2' ] Exploring Operation: $29 + 2 = 31$ ,Resulting Numbers: [31,31,12,36,35] 31,31 equal Current State:96:[31,12,36,35],Operations:[] Exploring Operation: $36 - 35 = 1$ ,Resulting Numbers: [31,12,1] Generated Node $\# 0,0$ :96:[31,12,1]Operation: $36 - 35 = 1$ Moving to Node $\# 0,0$ Current State:96:[31,12,1],Operations:'36-35=1' Exploring Operation: $31 + 1 = 32$ ,Resulting Numbers: [12,32] Generated Node $\# 0,0,0$ :96:[12,32]Operation: $31 + 1 = 32$ Moving to Node $\# 0,0,0$ Current State:96:[12,32],Operations:' $36 - 35 = 1$ ,' $31 + 1 = 32$ ' Exploring Operation: $12 + 32 = 44$ ,Resulting Numbers: [44] 44.96 unequal:No Solution ... Exploring Operation: $4*24 = 96$ ,Resulting Numbers:[96] +``` + +# Direct Solution Trace + +```txt +Current State: 96:[22, 77, 24, 48, 31, 12, 36, 35], Operations: [] Exploring Operation: 77-22=55, Resulting Numbers: [55, 24, 48, 31, 12, 36, 35] Generated Node #2: [55, 24, 48, 31, 12, 36, 35] from Operation: 77-22=55 Current State: 96:[55, 24, 48, 31, 12, 36, 35], Operations: ['77-22=55'] Exploring Operation: 48-24=24, Resulting Numbers: [55, 24, 31, 12, 36, 35] Generated Node #3: [55, 24, 31, 12, 36, 35] from Operation: 48-24=24 Current State: 96:[55, 24, 31, 12, 36, 35], Operations: ['77-22=55', '48-24=24'] Exploring Operation: 55-24=31, Resulting Numbers: [31, 31, 12, 36, 35] 31,31 equal Current State: 96:[31, 12, 36, 35], Operations: [] Exploring Operation: 35-31=4, Resulting Numbers: [4, 12, 36] Generated Node #2: [4, 12, 36] from Operation: 35-31=4 Current State: 96:[4, 12, 36], Operations: ['35-31=4'] Exploring Operation: 36-12=24, Resulting Numbers: [24, 4] Generated Node #3: [24, 4] from Operation: 36-12=24 Current State: 96:[24, 4], Operations: ['35-31=4', '36-12=24'] Exploring Operation: $4 \times 24 = 96$ ,Resulting Numbers:[96] 96,96 equal: Goal Reached +``` + +Figure 16: Example of training data for stacked-CoutnDown (Appendix F). Top: game setup. Middle: full DFS search trace for training backtracking model. Bottom: correct solution path for training direct solution model. \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07052/images/02b12a0d63aecd5659c4e656b5c8285ea2c85ddb288feb1efb1f169ec1e01426.jpg b/data/2025/2504_07xxx/2504.07052/images/02b12a0d63aecd5659c4e656b5c8285ea2c85ddb288feb1efb1f169ec1e01426.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0da66f78826f128e0911c0cb3016262030b02f9e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/02b12a0d63aecd5659c4e656b5c8285ea2c85ddb288feb1efb1f169ec1e01426.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92928ab0e4dffa8c805414a0cf960755bee6ce6b63c06f32cb553f55c3221439 +size 20148 diff --git a/data/2025/2504_07xxx/2504.07052/images/16a0114708d8b92c3f2baea7ffa0d6e48f30371b94caec65de5a259c8b2f430a.jpg b/data/2025/2504_07xxx/2504.07052/images/16a0114708d8b92c3f2baea7ffa0d6e48f30371b94caec65de5a259c8b2f430a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aa17f9e992b733d298d5b615a12cc484ea0a7fed --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/16a0114708d8b92c3f2baea7ffa0d6e48f30371b94caec65de5a259c8b2f430a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b828d88659d32bda83d7528ac5d63a3fa4eeaaf9c47bf97b36d00021e52b54fe +size 25489 diff --git a/data/2025/2504_07xxx/2504.07052/images/456bf56f639912bcebe5fe77df3d7949409e1d805af59d1b5cd9f8ffe0142eef.jpg b/data/2025/2504_07xxx/2504.07052/images/456bf56f639912bcebe5fe77df3d7949409e1d805af59d1b5cd9f8ffe0142eef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..636afe1014433f7f5ed6fae5bd6a5352e855f167 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/456bf56f639912bcebe5fe77df3d7949409e1d805af59d1b5cd9f8ffe0142eef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c431b4929e4a259fa98a270aafbec047fb1d692c72da0cc0800e7e52dbc03ec1 +size 3727 diff --git a/data/2025/2504_07xxx/2504.07052/images/45c3d566b5fe3c88b3258233a8b99a35829544e5efacedbb18a9463cabd9d636.jpg b/data/2025/2504_07xxx/2504.07052/images/45c3d566b5fe3c88b3258233a8b99a35829544e5efacedbb18a9463cabd9d636.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4a1b49a12b2b98e176ddd34b62b35210eb3468d4 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/45c3d566b5fe3c88b3258233a8b99a35829544e5efacedbb18a9463cabd9d636.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fccc842038c3994a8f165ac850ce0336d8e97b0fbd434080773c6599201b3544 +size 6392 diff --git a/data/2025/2504_07xxx/2504.07052/images/4845c8f0e654443d5b277561c128b32ab3ce49a0eb29172ae399b108b2328f01.jpg b/data/2025/2504_07xxx/2504.07052/images/4845c8f0e654443d5b277561c128b32ab3ce49a0eb29172ae399b108b2328f01.jpg new file mode 100644 index 0000000000000000000000000000000000000000..263fe8c0cfa24287efd05974dbf2e26895af1e72 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/4845c8f0e654443d5b277561c128b32ab3ce49a0eb29172ae399b108b2328f01.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b515d77662c0d977c777fcc0ae1733d74d4bd216617ea1ecc085b998e91ecc6 +size 30795 diff --git a/data/2025/2504_07xxx/2504.07052/images/574ecd7a6e5556ba5ba626aeade30078f52b3ad8e6a2dee475b386af00289639.jpg b/data/2025/2504_07xxx/2504.07052/images/574ecd7a6e5556ba5ba626aeade30078f52b3ad8e6a2dee475b386af00289639.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0afd443b2ae509c71218c03af6ac3da66064aacd --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/574ecd7a6e5556ba5ba626aeade30078f52b3ad8e6a2dee475b386af00289639.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63e134fe06389ea248cf71422bc24bc11b910e17aa18944492fc3fa9778687b3 +size 6018 diff --git a/data/2025/2504_07xxx/2504.07052/images/58fd3a6281a1bb9dfa15bece1a73c9939603a56ca0f044b45ec84822d61fe0f3.jpg b/data/2025/2504_07xxx/2504.07052/images/58fd3a6281a1bb9dfa15bece1a73c9939603a56ca0f044b45ec84822d61fe0f3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7b61bc0e4d6c61021347d1e10cdd4325ee9f4f5e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/58fd3a6281a1bb9dfa15bece1a73c9939603a56ca0f044b45ec84822d61fe0f3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44df99c169c6ea7c32cb19f66edca682d1c7870c37f3cfdf2118633b521872b5 +size 23421 diff --git a/data/2025/2504_07xxx/2504.07052/images/5e892c3a71230aa82910617527ddd4696a13bfb4936334f3110ec7c974dc70b3.jpg b/data/2025/2504_07xxx/2504.07052/images/5e892c3a71230aa82910617527ddd4696a13bfb4936334f3110ec7c974dc70b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..36e72d68080909f53b0d666153f6a843965be0f4 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/5e892c3a71230aa82910617527ddd4696a13bfb4936334f3110ec7c974dc70b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:101adb10d9f5c06a271402c9a2ec3d17b3052104f46ca62c67534be223e5a150 +size 6420 diff --git a/data/2025/2504_07xxx/2504.07052/images/64eea15ad12a2ff0fc19f7e2b733fda8065aa921d4af04dc08e790c09f82a37d.jpg b/data/2025/2504_07xxx/2504.07052/images/64eea15ad12a2ff0fc19f7e2b733fda8065aa921d4af04dc08e790c09f82a37d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..552d108d247db22b971a37a7c4b51aeab30a3501 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/64eea15ad12a2ff0fc19f7e2b733fda8065aa921d4af04dc08e790c09f82a37d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dd0a3be4f2baefc213d73fb5828c3480da6277e8de7fdf259e61001aebd81e89 +size 28586 diff --git a/data/2025/2504_07xxx/2504.07052/images/69ac0e6e576a49a10af498ffd099c72ba54c2b238e83d3d324692aeb4a963b1e.jpg b/data/2025/2504_07xxx/2504.07052/images/69ac0e6e576a49a10af498ffd099c72ba54c2b238e83d3d324692aeb4a963b1e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..de83c6692e9be3ffbb721bfc6fdfb0739b686f8e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/69ac0e6e576a49a10af498ffd099c72ba54c2b238e83d3d324692aeb4a963b1e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:cb7b5ffd5e65bdd87fb5e775fc12927a143f63a472b490db10de5fcc6e8f5df4 +size 57775 diff --git a/data/2025/2504_07xxx/2504.07052/images/6c35e4c35a8fbc4b9bb884248ec42755727f5cb1a53b8128d768384367b79683.jpg b/data/2025/2504_07xxx/2504.07052/images/6c35e4c35a8fbc4b9bb884248ec42755727f5cb1a53b8128d768384367b79683.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1474ec3d6b079a06ad48fb2b85f7a3c2b10ca51d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/6c35e4c35a8fbc4b9bb884248ec42755727f5cb1a53b8128d768384367b79683.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:39d329743559aca6b89c67f148df3272da02a426427a342d060872da80ca08b5 +size 13581 diff --git a/data/2025/2504_07xxx/2504.07052/images/6d48ef2be03c9c8fe24eb88ceffb6ab73372724258852629afc0ff253920d49b.jpg b/data/2025/2504_07xxx/2504.07052/images/6d48ef2be03c9c8fe24eb88ceffb6ab73372724258852629afc0ff253920d49b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cfa3152e5dbc035b419d20064624a93f51689b64 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/6d48ef2be03c9c8fe24eb88ceffb6ab73372724258852629afc0ff253920d49b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3fe14cc2fe7f2f6ead7723308386e7e7005a019517f4b54177c02fca5cc29ce +size 14090 diff --git a/data/2025/2504_07xxx/2504.07052/images/6d58a67cb14836a8528234812303ed4b872557173c0aaf09e6fd2050fe82eb1f.jpg b/data/2025/2504_07xxx/2504.07052/images/6d58a67cb14836a8528234812303ed4b872557173c0aaf09e6fd2050fe82eb1f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ff48b7e8ddc4f706ead8dabcbb300c7cb850ac9d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/6d58a67cb14836a8528234812303ed4b872557173c0aaf09e6fd2050fe82eb1f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:42dacaa0f53ca723f5e398be03d0acd46324f9ca2bb1e02d79708cefa271471b +size 28315 diff --git a/data/2025/2504_07xxx/2504.07052/images/70e27d83371e2162ae2ad3312e8604d72f509e110ba9bf9f581d4ee15f31c6b6.jpg b/data/2025/2504_07xxx/2504.07052/images/70e27d83371e2162ae2ad3312e8604d72f509e110ba9bf9f581d4ee15f31c6b6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..733947c54658eb89caab2d7f19bc551fcdc8207f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/70e27d83371e2162ae2ad3312e8604d72f509e110ba9bf9f581d4ee15f31c6b6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0d984aeacfd5934ea8752b7cca045177e96235ac1f0c2bd3cc5a4fbbc2975e7f +size 13688 diff --git a/data/2025/2504_07xxx/2504.07052/images/74d0920a8d16477d6e33f418d0398f280da866ebfc2cffcf91324a04375b76a2.jpg b/data/2025/2504_07xxx/2504.07052/images/74d0920a8d16477d6e33f418d0398f280da866ebfc2cffcf91324a04375b76a2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f67ea1b3652af3958428b69d17ff82d9649872f5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/74d0920a8d16477d6e33f418d0398f280da866ebfc2cffcf91324a04375b76a2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31f1e67ef529c5741363bb10a80c0312625b1360596ce26f2388a4491cf693a5 +size 27856 diff --git a/data/2025/2504_07xxx/2504.07052/images/7e900a1a3702d874176ede358ecb138cc0af4e0c40a03c1f9b5a345fc93aee4a.jpg b/data/2025/2504_07xxx/2504.07052/images/7e900a1a3702d874176ede358ecb138cc0af4e0c40a03c1f9b5a345fc93aee4a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e6495158ca8478b58a92ffa058f5fc33fb75a18c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/7e900a1a3702d874176ede358ecb138cc0af4e0c40a03c1f9b5a345fc93aee4a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f65f2c7b72799594726ea8354276a2fa8e33db68877f679649951ce2e438313e +size 22583 diff --git a/data/2025/2504_07xxx/2504.07052/images/80d09d2845a5fe640d41cafb42f29595802eafe4a00ac34ac72dcec1f68b6798.jpg b/data/2025/2504_07xxx/2504.07052/images/80d09d2845a5fe640d41cafb42f29595802eafe4a00ac34ac72dcec1f68b6798.jpg new file mode 100644 index 0000000000000000000000000000000000000000..14bfe86a379cc5bc8f92d7d0e9ae46b91345f0a1 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/80d09d2845a5fe640d41cafb42f29595802eafe4a00ac34ac72dcec1f68b6798.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:582f152a06ab0c341952a1113ad8ab8940476af2b62979e8c87cdc4f661335a5 +size 5199 diff --git a/data/2025/2504_07xxx/2504.07052/images/812d28a296faed703ecac1e5215249a835aa2ce1668ed109dca4556d34e7a424.jpg b/data/2025/2504_07xxx/2504.07052/images/812d28a296faed703ecac1e5215249a835aa2ce1668ed109dca4556d34e7a424.jpg new file mode 100644 index 0000000000000000000000000000000000000000..41f3fdc88272b276c845f4cf45e4058a22b21ab2 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/812d28a296faed703ecac1e5215249a835aa2ce1668ed109dca4556d34e7a424.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b28f0d45e58993b366bd693151bd604ec7ad0f48f15cd8d53fea78aff0d1f11d +size 20972 diff --git a/data/2025/2504_07xxx/2504.07052/images/8130329fe5d7d8a2269feb52adbcbdbdac3a6312e097ab867dacdcd5c5d9099c.jpg b/data/2025/2504_07xxx/2504.07052/images/8130329fe5d7d8a2269feb52adbcbdbdac3a6312e097ab867dacdcd5c5d9099c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d8b9c839d5463f8d76b371ea4377cf584dce5df --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/8130329fe5d7d8a2269feb52adbcbdbdac3a6312e097ab867dacdcd5c5d9099c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5685e4c81128b971a3c6866b2f4e47e6f17e40f18164d9a08570c3ba3043e29b +size 3383 diff --git a/data/2025/2504_07xxx/2504.07052/images/8eb41fd414170e86a8c76ead06fd22df26d60ebd1999e2cf7e9f99d3edb8492a.jpg b/data/2025/2504_07xxx/2504.07052/images/8eb41fd414170e86a8c76ead06fd22df26d60ebd1999e2cf7e9f99d3edb8492a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89a07d57312f7dbd0791ac15d7efaec11f7502f4 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/8eb41fd414170e86a8c76ead06fd22df26d60ebd1999e2cf7e9f99d3edb8492a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c650fae86298f9cd4fe9a8e30a8c67007c9adc8a8cdf6862903eab0011d510e4 +size 14544 diff --git a/data/2025/2504_07xxx/2504.07052/images/90573dfe71e77f9da3d2327c2e68b2211bc164de4f3632147e1b10f7612d2631.jpg b/data/2025/2504_07xxx/2504.07052/images/90573dfe71e77f9da3d2327c2e68b2211bc164de4f3632147e1b10f7612d2631.jpg new file mode 100644 index 0000000000000000000000000000000000000000..06c12f2a63917cf4388d5171b9c64ab7f32d416f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/90573dfe71e77f9da3d2327c2e68b2211bc164de4f3632147e1b10f7612d2631.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:544a27a52f1c5990ca15c4318bb08c9d82ff6ae94a3ffeb2ee645f7b2554c5a8 +size 22868 diff --git a/data/2025/2504_07xxx/2504.07052/images/926acdc15b5f62a9030e2b341cc566625d663dc62c206a1cfe0ddd4f77598eea.jpg b/data/2025/2504_07xxx/2504.07052/images/926acdc15b5f62a9030e2b341cc566625d663dc62c206a1cfe0ddd4f77598eea.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fd7928772fbd71d2d4155a542906cca6588f6a64 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/926acdc15b5f62a9030e2b341cc566625d663dc62c206a1cfe0ddd4f77598eea.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1ad4fffa459bc205c004a89d546e12dee568b592de77d9af6615d7dad12e1a9 +size 19472 diff --git a/data/2025/2504_07xxx/2504.07052/images/932547a1aae0442be0b8ed55b64d894efab339f8a1e422c141b2a42aeb51fdfc.jpg b/data/2025/2504_07xxx/2504.07052/images/932547a1aae0442be0b8ed55b64d894efab339f8a1e422c141b2a42aeb51fdfc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5a026ffc6f4ca82e884f90e3c35b3f7d2415945d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/932547a1aae0442be0b8ed55b64d894efab339f8a1e422c141b2a42aeb51fdfc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c478d8f85692c304086e66ccb963d775d89c16bfe1f502d23e5cc109e188831b +size 21043 diff --git a/data/2025/2504_07xxx/2504.07052/images/93f7d7867694af716476750ed208a1f45b43f84d6a6f61f3e5d59f57fa4913ff.jpg b/data/2025/2504_07xxx/2504.07052/images/93f7d7867694af716476750ed208a1f45b43f84d6a6f61f3e5d59f57fa4913ff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4c713f6647e220ffbd44c9242a49b48cc3532b9f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/93f7d7867694af716476750ed208a1f45b43f84d6a6f61f3e5d59f57fa4913ff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fac6d7d31e121ead90c62de9f0e21d4fee599674332c45e3d939ca921a05c77 +size 23209 diff --git a/data/2025/2504_07xxx/2504.07052/images/9b1b98b03adf16268a74f8dec7aaeb6508fe5869505875ad5daa40391ef2fe85.jpg b/data/2025/2504_07xxx/2504.07052/images/9b1b98b03adf16268a74f8dec7aaeb6508fe5869505875ad5daa40391ef2fe85.jpg new file mode 100644 index 0000000000000000000000000000000000000000..134948848f9fc3486f20bfe9bf6ef73db5ce0aaa --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/9b1b98b03adf16268a74f8dec7aaeb6508fe5869505875ad5daa40391ef2fe85.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24d293beb0679889b882f51e47d9f1f7fe1293a7b074c90ead66017058c989bf +size 108014 diff --git a/data/2025/2504_07xxx/2504.07052/images/a5baf5c34e74588f14d8ecbd99a21e83bd733de70d8b276db19b5acc6c40b6b3.jpg b/data/2025/2504_07xxx/2504.07052/images/a5baf5c34e74588f14d8ecbd99a21e83bd733de70d8b276db19b5acc6c40b6b3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2cd2c781706c898190247e125abb9472243c429c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/a5baf5c34e74588f14d8ecbd99a21e83bd733de70d8b276db19b5acc6c40b6b3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:64c9162f18bf4f71af740280ede19ed83531ac5d9341bb956ed78ccd6c5ef4da +size 12157 diff --git a/data/2025/2504_07xxx/2504.07052/images/a7f0cb936326cfeac7c67d4002567f49d6d2af471738d349c7d0ed30b32045bb.jpg b/data/2025/2504_07xxx/2504.07052/images/a7f0cb936326cfeac7c67d4002567f49d6d2af471738d349c7d0ed30b32045bb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bcc5db53ae76ed78b47a5c1267ad8debcfda074d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/a7f0cb936326cfeac7c67d4002567f49d6d2af471738d349c7d0ed30b32045bb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7380d87b790b46e2d4fd42893195c098b0b3b0abac6b42b6d93524f23a87137 +size 12618 diff --git a/data/2025/2504_07xxx/2504.07052/images/ae64856c00c49718b08575467901ac63bde0a1130a794f68a3fc2292876c436c.jpg b/data/2025/2504_07xxx/2504.07052/images/ae64856c00c49718b08575467901ac63bde0a1130a794f68a3fc2292876c436c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0c2843e867b4b5e64bffd208d8710fe83b643cfb --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/ae64856c00c49718b08575467901ac63bde0a1130a794f68a3fc2292876c436c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9cf9d36196236e31202e7e69bd3d129fb49ef0f6bbd31600c6961082406ebcc +size 12908 diff --git a/data/2025/2504_07xxx/2504.07052/images/b78f6358bb8f2f1fa9c15f4102fea440d9c5a986e0710809e9168c38b2874448.jpg b/data/2025/2504_07xxx/2504.07052/images/b78f6358bb8f2f1fa9c15f4102fea440d9c5a986e0710809e9168c38b2874448.jpg new file mode 100644 index 0000000000000000000000000000000000000000..64b2e01a527dc35da894dfd319ebe524166d4f44 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/b78f6358bb8f2f1fa9c15f4102fea440d9c5a986e0710809e9168c38b2874448.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1e74eff2b587f38c6cf6eba9a32b3135a8b8cb22d2504511af45e15c0a7fb32e +size 30345 diff --git a/data/2025/2504_07xxx/2504.07052/images/c0806c8c2a5e96f1f7a0026e9c74be9031e96165b9ef739ed5759bc84f66787e.jpg b/data/2025/2504_07xxx/2504.07052/images/c0806c8c2a5e96f1f7a0026e9c74be9031e96165b9ef739ed5759bc84f66787e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c5b3271827c5e2cb3155d707c78101457a3b986 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/c0806c8c2a5e96f1f7a0026e9c74be9031e96165b9ef739ed5759bc84f66787e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:465cec5c8cd40677d27148c63863182f4939590edc83486cfa7cdeed939733e1 +size 20352 diff --git a/data/2025/2504_07xxx/2504.07052/images/c3231bcd3f7a4377e0bb50797ec0d78ce075ede1cd66eeda2dce0e11b6a62c45.jpg b/data/2025/2504_07xxx/2504.07052/images/c3231bcd3f7a4377e0bb50797ec0d78ce075ede1cd66eeda2dce0e11b6a62c45.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b866d4eb8f81ce53cf55d0c2e3c22716e3bde987 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/c3231bcd3f7a4377e0bb50797ec0d78ce075ede1cd66eeda2dce0e11b6a62c45.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:125cef0aafce8acdda5e7cbdf0b95b614b7448c8617e08d5fa117dd3ac0c3259 +size 21218 diff --git a/data/2025/2504_07xxx/2504.07052/images/e36fe3888388f8faacc648f6647e91131e8f15d326a7c7ed9d7a8e014287a021.jpg b/data/2025/2504_07xxx/2504.07052/images/e36fe3888388f8faacc648f6647e91131e8f15d326a7c7ed9d7a8e014287a021.jpg new file mode 100644 index 0000000000000000000000000000000000000000..be144909dde310f5bbf88523cbcf6ee2f92231bc --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/e36fe3888388f8faacc648f6647e91131e8f15d326a7c7ed9d7a8e014287a021.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e89eeb6bb12092b1d1361745ab5b49cbda27683f29022d14b5694fdfb98c8aa3 +size 21061 diff --git a/data/2025/2504_07xxx/2504.07052/images/e70f23b00318eb73998084c7cc03a262857c0b2c31e8da7e040b4feba0f83084.jpg b/data/2025/2504_07xxx/2504.07052/images/e70f23b00318eb73998084c7cc03a262857c0b2c31e8da7e040b4feba0f83084.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ba46143e69d008176c403f6ac56cfdb6acc9919 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/e70f23b00318eb73998084c7cc03a262857c0b2c31e8da7e040b4feba0f83084.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:080427ece3e9e5470e74afc885119f2361e5a3d97d7d97f9b0b09d91e295c56b +size 15839 diff --git a/data/2025/2504_07xxx/2504.07052/images/f6b5c72489b87d26c48b44d4f55f9ca147976d6fd1d61acc75e1b1ed0f2b685f.jpg b/data/2025/2504_07xxx/2504.07052/images/f6b5c72489b87d26c48b44d4f55f9ca147976d6fd1d61acc75e1b1ed0f2b685f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5778ca6d2a357a7aff477b725c9971e183ef497f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/f6b5c72489b87d26c48b44d4f55f9ca147976d6fd1d61acc75e1b1ed0f2b685f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1a03d4dca18bc647e737cebe85b08653738bf549f6f76a8ac4abb87b4c01209 +size 20217 diff --git a/data/2025/2504_07xxx/2504.07052/images/f744918c025c250a03c698c53abc9f6b439ada25c09467401c31889e902dd09a.jpg b/data/2025/2504_07xxx/2504.07052/images/f744918c025c250a03c698c53abc9f6b439ada25c09467401c31889e902dd09a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2876ff6d034d5d5951b387d4aec5af454163d7c8 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/f744918c025c250a03c698c53abc9f6b439ada25c09467401c31889e902dd09a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:74726d890a3a84a02e87cf3f2b7b3d89f5bd3b56b762ca3be7d5f001267e3cdd +size 37237 diff --git a/data/2025/2504_07xxx/2504.07052/images/f9c05a7eabbf3900cda3687f00654531d5b068bf60ac7e40b2f0d44c39add68a.jpg b/data/2025/2504_07xxx/2504.07052/images/f9c05a7eabbf3900cda3687f00654531d5b068bf60ac7e40b2f0d44c39add68a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dde2b7a75900af300b86522bccb4e5414234065c --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/f9c05a7eabbf3900cda3687f00654531d5b068bf60ac7e40b2f0d44c39add68a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b98460de0b0630fd7d289d4a4d3fc7b96e3cc98d8849ec99148829b50721fa49 +size 16090 diff --git a/data/2025/2504_07xxx/2504.07052/images/fc56edd9b4072e1423ef98a3d12b83f40442c91276c3388b7c61b2accbcc2e7f.jpg b/data/2025/2504_07xxx/2504.07052/images/fc56edd9b4072e1423ef98a3d12b83f40442c91276c3388b7c61b2accbcc2e7f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a7c5258c93ccf9438b7683bd24a6e80104bf7b0a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/images/fc56edd9b4072e1423ef98a3d12b83f40442c91276c3388b7c61b2accbcc2e7f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2ce89dbc9eff8a906f728a97820286a145e26cb39e40c2c1fbf2805c5d85e9fe +size 40476 diff --git a/data/2025/2504_07xxx/2504.07052/layout.json b/data/2025/2504_07xxx/2504.07052/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..1fa35b1672d064819551c9a046cae5e6b13ac424 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07052/layout.json @@ -0,0 +1,14459 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 104, + 79, + 435, + 113 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 79, + 435, + 113 + ], + "spans": [ + { + "bbox": [ + 104, + 79, + 435, + 113 + ], + "type": "text", + "content": "To Backtrack or Not to Backtrack: When Sequential Search Limits Model Reasoning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 137, + 160, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 137, + 160, + 148 + ], + "spans": [ + { + "bbox": [ + 111, + 137, + 160, + 148 + ], + "type": "text", + "content": "Tian Qin*" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 148, + 201, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 148, + 201, + 160 + ], + "spans": [ + { + "bbox": [ + 111, + 148, + 201, + 160 + ], + "type": "text", + "content": "Harvard University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 298, + 136, + 401, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 136, + 401, + 148 + ], + "spans": [ + { + "bbox": [ + 298, + 136, + 401, + 148 + ], + "type": "text", + "content": "David Alvarez-Melis" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 298, + 148, + 498, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 298, + 148, + 498, + 160 + ], + "spans": [ + { + "bbox": [ + 298, + 148, + 498, + 160 + ], + "type": "text", + "content": "Harvard University, Kempner Institute, MSR" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 175, + 176, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 175, + 176, + 188 + ], + "spans": [ + { + "bbox": [ + 111, + 175, + 176, + 188 + ], + "type": "text", + "content": "Samy Jelassi†" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 188, + 286, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 188, + 286, + 200 + ], + "spans": [ + { + "bbox": [ + 111, + 188, + 286, + 200 + ], + "type": "text", + "content": "Harvard University, Kempner Institute" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 310, + 175, + 377, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 175, + 377, + 186 + ], + "spans": [ + { + "bbox": [ + 310, + 175, + 377, + 186 + ], + "type": "text", + "content": "Eran Malach‡†" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 310, + 187, + 484, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 310, + 187, + 484, + 200 + ], + "spans": [ + { + "bbox": [ + 310, + 187, + 484, + 200 + ], + "type": "text", + "content": "Harvard University, Kempner Institute" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 280, + 227, + 330, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 227, + 330, + 239 + ], + "spans": [ + { + "bbox": [ + 280, + 227, + 330, + 239 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 140, + 251, + 471, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 251, + 471, + 506 + ], + "spans": [ + { + "bbox": [ + 140, + 251, + 471, + 506 + ], + "type": "text", + "content": "Recent advancements in large language models (LLMs) have significantly improved their reasoning abilities, particularly through techniques involving search and backtracking. Backtracking naturally scales test-time compute by enabling sequential, linearized exploration via long chain-of-thought (CoT) generation. However, this is not the only strategy for scaling test time-compute: parallel sampling with best-of-" + }, + { + "bbox": [ + 140, + 251, + 471, + 506 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 140, + 251, + 471, + 506 + ], + "type": "text", + "content": " selection provides an alternative that generates diverse solutions simultaneously. Despite the growing adoption of sequential search, its advantages over parallel sampling—especially under a fixed compute budget—remain poorly understood. In this paper, we systematically compare these two approaches on two challenging reasoning tasks: CountDown and Sudoku. Surprisingly, we find that sequential search underperforms parallel sampling on CountDown but outperforms it on Sudoku, suggesting that backtracking is not universally beneficial. We identify two factors that can cause backtracking to degrade performance: (1) training on fixed search traces can lock models into suboptimal strategies, and (2) explicit CoT supervision can discourage 'implicit' (non verbalized) reasoning. Extending our analysis to reinforcement learning (RL), we show that models with backtracking capabilities benefit significantly from RL fine-tuning, while models without backtracking see limited, mixed gains. Together, these findings challenge the assumption that backtracking universally enhances LLM reasoning, instead revealing a complex interaction between task structure, training data, model scale, and learning paradigm." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 519, + 196, + 532 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 519, + 196, + 532 + ], + "spans": [ + { + "bbox": [ + 105, + 519, + 196, + 532 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 537, + 506, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 537, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 104, + 537, + 506, + 639 + ], + "type": "text", + "content": "Recent studies (Kumar et al., 2024; Havrilla et al., 2024) propose teaching LLMs to correct mistakes through backtracking, enabling exploration of alternative solutions. Despite growing popularity (DeepSeek-AI et al., 2025; Muennighoff et al., 2025), it remains unclear whether correcting errors post-hoc via backtracking is ultimately more compute-efficient at test time than directly learning the correct solution. Solving strategic games such as CountDown and Sudo require extensive exploration of different solution paths, making them ideal for analyzing the computational trade-offs of sequential versus parallel search. In this work, we use these two games to conduct a controlled investigation to determine whether backtracking is an effective way to scale test-time compute." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 646, + 506, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 646, + 506, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 646, + 506, + 704 + ], + "type": "text", + "content": "There are two primary strategies to scale LLMs' test-time compute: sequential autoregressive search (explicit backtracking within a chain-of-thought) and parallel sampling (generating multiple independent solutions and selecting the best with best-of- " + }, + { + "bbox": [ + 104, + 646, + 506, + 704 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 646, + 506, + 704 + ], + "type": "text", + "content": "). While sequential search allows the model to refine reasoning by learning from past mistakes, it comes at a cost: due to the attention mechanism, the FLOPs required to generate CoT grow quadratically" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 709, + 398, + 721 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 709, + 398, + 721 + ], + "spans": [ + { + "bbox": [ + 116, + 709, + 398, + 721 + ], + "type": "text", + "content": "*Correspondence to tqin@g.harvard.edu ‡ Equal senior contributions." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 117, + 721, + 201, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 721, + 201, + 732 + ], + "spans": [ + { + "bbox": [ + 117, + 721, + 201, + 732 + ], + "type": "text", + "content": "†Currently at Apple." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 14, + 225, + 36, + 567 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 225, + 36, + 567 + ], + "spans": [ + { + "bbox": [ + 14, + 225, + 36, + 567 + ], + "type": "text", + "content": "arXiv:2504.07052v2 [cs.LG] 3 Oct 2025" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 82, + 375, + 190 + ], + "blocks": [ + { + "bbox": [ + 109, + 82, + 375, + 190 + ], + "lines": [ + { + "bbox": [ + 109, + 82, + 375, + 190 + ], + "spans": [ + { + "bbox": [ + 109, + 82, + 375, + 190 + ], + "type": "image", + "image_path": "4845c8f0e654443d5b277561c128b32ab3ce49a0eb29172ae399b108b2328f01.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 200, + 506, + 278 + ], + "lines": [ + { + "bbox": [ + 104, + 200, + 506, + 278 + ], + "spans": [ + { + "bbox": [ + 104, + 200, + 506, + 278 + ], + "type": "text", + "content": "Figure 1: Backtracking performance varies significantly with task type and the application of post-training reinforcement learning. (A) Training backtracking and direct solution models on CountDown and Selenium reveals task-dependent performance: under equal test-time compute, backtracking (sequential search) underperforms direct solution with best-of-" + }, + { + "bbox": [ + 104, + 200, + 506, + 278 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 200, + 506, + 278 + ], + "type": "text", + "content": " generation (parallel search) on CountDown, but outperforms it on Selenium. (B) Fine-tuning with GRPO consistently improves backtracking model performance across compute budgets, but has mixed effects on the direct solution model." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 376, + 83, + 504, + 190 + ], + "blocks": [ + { + "bbox": [ + 376, + 83, + 504, + 190 + ], + "lines": [ + { + "bbox": [ + 376, + 83, + 504, + 190 + ], + "spans": [ + { + "bbox": [ + 376, + 83, + 504, + 190 + ], + "type": "image", + "image_path": "f9c05a7eabbf3900cda3687f00654531d5b068bf60ac7e40b2f0d44c39add68a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 300, + 506, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 300, + 506, + 380 + ], + "spans": [ + { + "bbox": [ + 104, + 300, + 506, + 380 + ], + "type": "text", + "content": "with sequence length. Even when generating the same number of tokens, sequential search incurs more FLOPs than parallel sampling. To compare these two strategies, we train (i) backtracking models that learn from explicit search traces and use sequential search to solve hard problems, and (ii) direct solution (i.e., no backtracking) models that learn solely from correct solutions, using parallel search at test time. Equating test-time compute, we observe contrasting results (Fig. 1 A): in CountDown, the backtracking model consistently underperforms, whereas in Sudo, it consistently outperforms the direct solution model." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 388, + 507, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 507, + 533 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 507, + 533 + ], + "type": "text", + "content": "Through controlled experiments, we identify two reasons teaching backtracking can inadvertently degrade performance. First, explicit backtracking reasoning traces bias models toward prescribed search strategies, limiting exploration of potentially superior alternatives. In CountDown, the backtracking model closely mimics training search paths, while the direct solution model independently discovers more efficient strategies (Section 4.2). Second, detailed backtracking traces encourage morbidity (producing lengthy yet ineffective reasoning chains), while discouraging internal \"thinking\" (implicit reasoning without outputting CoT, Section 4.3). Beyond these factors, we demonstrate that model size and task-specific characteristics also impact the effectiveness of backtracking (Section 5.1). Crucially, we show that our contrastive observation between Sudoku and Countdown generalizes to real-world tasks: such as math and science problem solving. We show that backtracking is not always the most effective way to scale test-time compute (Appendix A) for general reasoning models." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 541, + 506, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 506, + 632 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 506, + 632 + ], + "type": "text", + "content": "Extending beyond supervised learning, we evaluate reinforcement learning (RL) with Group Relative Policy Optimization (GRPO) (Shao et al., 2024), uncovering novel interactions between backtracking capabilities and RL. We show that the backtracking model discovers new, effective search strategies through RL, achieving substantial performance improvements. Conversely, the direct solution model improves one-shot accuracy but loses effectiveness in parallel search, revealing a clear trade-off (Fig. 1 B). This finding shifts our understanding of how backtracking influences a model's potential to improve under RL, highlighting the unique advantage of teaching backtracking for long-term reasoning capabilities." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 640, + 504, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 640, + 504, + 664 + ], + "spans": [ + { + "bbox": [ + 104, + 640, + 504, + 664 + ], + "type": "text", + "content": "Our controlled study on two strategic games provides a nuanced understanding of when backtracking effectively scales test-time compute. Our main contributions are:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 674, + 505, + 733 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 107, + 674, + 505, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 674, + 505, + 708 + ], + "spans": [ + { + "bbox": [ + 107, + 674, + 505, + 708 + ], + "type": "text", + "content": "- We use CountDown and Selenium as controlled testbeds to examine whether backtracking enables efficient test-time scaling. Under a fixed compute budget, backtracking outperforms parallel search in Selenium but underperforms in CountDown (Fig. 1 A)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 709, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 709, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 107, + 709, + 505, + 733 + ], + "type": "text", + "content": "- We identify two key factors affecting backtracking efficacy: (1) Prescribed search bias: Training on detailed backtracking traces can unintentionally constrain models to subopti" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 750, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 136, + 82, + 299, + 243 + ], + "blocks": [ + { + "bbox": [ + 136, + 82, + 299, + 243 + ], + "lines": [ + { + "bbox": [ + 136, + 82, + 299, + 243 + ], + "spans": [ + { + "bbox": [ + 136, + 82, + 299, + 243 + ], + "type": "image", + "image_path": "16a0114708d8b92c3f2baea7ffa0d6e48f30371b94caec65de5a259c8b2f430a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 316, + 82, + 484, + 223 + ], + "blocks": [ + { + "bbox": [ + 316, + 82, + 484, + 223 + ], + "lines": [ + { + "bbox": [ + 316, + 82, + 484, + 223 + ], + "spans": [ + { + "bbox": [ + 316, + 82, + 484, + 223 + ], + "type": "image", + "image_path": "02b12a0d63aecd5659c4e656b5c8285ea2c85ddb288feb1efb1f169ec1e01426.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 136, + 245, + 468, + 289 + ], + "blocks": [ + { + "bbox": [ + 136, + 245, + 468, + 289 + ], + "lines": [ + { + "bbox": [ + 136, + 245, + 468, + 289 + ], + "spans": [ + { + "bbox": [ + 136, + 245, + 468, + 289 + ], + "type": "image", + "image_path": "93f7d7867694af716476750ed208a1f45b43f84d6a6f61f3e5d59f57fa4913ff.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 174, + 323, + 382, + 388 + ], + "blocks": [ + { + "bbox": [ + 161, + 295, + 355, + 322 + ], + "lines": [ + { + "bbox": [ + 161, + 295, + 355, + 322 + ], + "spans": [ + { + "bbox": [ + 161, + 295, + 355, + 322 + ], + "type": "text", + "content": "Direct solution model performs parallel search through temperature sampling:" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 174, + 323, + 382, + 388 + ], + "lines": [ + { + "bbox": [ + 174, + 323, + 382, + 388 + ], + "spans": [ + { + "bbox": [ + 174, + 323, + 382, + 388 + ], + "type": "image", + "image_path": "812d28a296faed703ecac1e5215249a835aa2ce1668ed109dca4556d34e7a424.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 401, + 504, + 468 + ], + "lines": [ + { + "bbox": [ + 104, + 401, + 504, + 468 + ], + "spans": [ + { + "bbox": [ + 104, + 401, + 504, + 468 + ], + "type": "text", + "content": "Figure 2: Backtracking and direct solution for two different strategic games. Panel " + }, + { + "bbox": [ + 104, + 401, + 504, + 468 + ], + "type": "inline_equation", + "content": "(a, b)" + }, + { + "bbox": [ + 104, + 401, + 504, + 468 + ], + "type": "text", + "content": ": Example the search tree for CountDown and Sudoku. Solving both games require extensive search in the solution space. Panel " + }, + { + "bbox": [ + 104, + 401, + 504, + 468 + ], + "type": "inline_equation", + "content": "(c)" + }, + { + "bbox": [ + 104, + 401, + 504, + 468 + ], + "type": "text", + "content": ": The backtracking model is trained on the search traces generated by a Depth-First-Search (DFS) algorithm. At test time, the model performs sequential search. The direct solution model is trained on the correct solution only. At test time, the model performs parallel search through temperature sampling and takes best-of-" + }, + { + "bbox": [ + 104, + 401, + 504, + 468 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 401, + 504, + 468 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "bbox": [ + 116, + 487, + 504, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 487, + 504, + 512 + ], + "spans": [ + { + "bbox": [ + 116, + 487, + 504, + 512 + ], + "type": "text", + "content": "mal search strategies. (2) Excessive morbidity: Explicit backtracking traces encourage models to produce lengthy reasoning chains without improving reasoning ability." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 108, + 512, + 505, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 512, + 505, + 546 + ], + "spans": [ + { + "bbox": [ + 108, + 512, + 505, + 546 + ], + "type": "text", + "content": "- We demonstrate that reinforcement learning (GRPO) consistently enhances backtracking models by enabling discovery of novel solutions, whereas direct solution models experience mixed outcomes (Fig. 1 B)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 556, + 201, + 569 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 556, + 201, + 569 + ], + "spans": [ + { + "bbox": [ + 105, + 556, + 201, + 569 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 575, + 358, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 575, + 358, + 587 + ], + "spans": [ + { + "bbox": [ + 104, + 575, + 358, + 587 + ], + "type": "text", + "content": "See Appendix B for an extensive review on related work." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 593, + 506, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 593, + 506, + 683 + ], + "spans": [ + { + "bbox": [ + 104, + 593, + 506, + 683 + ], + "type": "text", + "content": "Scaling test-time compute. Prior work has explored scaling language model performance at test time through parallel or sequential search strategies. Parallel methods rely on independent sampling and selection via heuristics or reward models (Brown et al., 2024; Irvine et al., 2023; Levi, 2024; Xin et al., 2024), while sequential methods refine reasoning step by step using earlier outputs (Hou et al., 2025; Lee et al., 2025). Tree-based methods such as MCTS bridge the two and often incorporate process-level reward models to guide reasoning (Wu et al., 2024; Lightman et al., 2023). Our work contributes to this area by comparing sequential (backtracking) and parallel search under fixed compute budgets." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "content": "Self-correction and backtracking. Language models can be trained to self-correct through fine-tuning on revision data, synthetic augmentations, or reward-based learning (Saunders et al., 2022; Qu et al., 2024; Welleck et al., 2022). Some approaches also introduce explicit search or separate correction modules to guide revision (Yao et al., 2023b; Havrilla et al.," + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 506, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 81, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 81, + 506, + 106 + ], + "type": "text", + "content": "2024). We build on this line of work by studying backtracking as an implicit form of self-correction, analyzing when learning to backtrack helps or hinders reasoning." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 116, + 506, + 196 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 116, + 506, + 196 + ], + "spans": [ + { + "bbox": [ + 104, + 116, + 506, + 196 + ], + "type": "text", + "content": "Reinforcement learning for LLM reasoning. Reinforcement learning has shown promise in enabling language models to autonomously discover reasoning strategies, including through simplified algorithms like GRPO (Shao et al., 2024; DeepSeek-AI et al., 2025). While prior work has demonstrated strong results, it remains unclear which model properties enable successful RL-based reasoning (Zelikman et al., 2022; Kazemnejad et al., 2024). Our study addresses this gap by comparing how backtracking and no backtracking models respond to RL fine-tuning, revealing asymmetric benefits." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 209, + 381, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 209, + 381, + 224 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 381, + 224 + ], + "type": "text", + "content": "3 Two strategic games: CountDown and Sudoku" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 231, + 188, + 242 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 231, + 188, + 242 + ], + "spans": [ + { + "bbox": [ + 105, + 231, + 188, + 242 + ], + "type": "text", + "content": "3.1 CountDown" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 247, + 191, + 260 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 247, + 191, + 260 + ], + "spans": [ + { + "bbox": [ + 105, + 247, + 191, + 260 + ], + "type": "text", + "content": "3.1.1 Game setup" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 262, + 506, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 262, + 506, + 331 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 506, + 331 + ], + "type": "text", + "content": "The Game of CountDown has been frequently used as a testbed to study and evaluate LLM reasoning (Gandhi et al., 2024; 2025; Yao et al., 2023a). In a CountDown game, the player is given a set of candidate numbers and a target number (restricted to integers). The goal is to reach the target by applying a sequence of arithmetic operations—addition, subtraction, multiplication, or division—using the candidate numbers. Each number must be used exactly once, and intermediate results can be reused in subsequent operations." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 340, + 506, + 420 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 340, + 506, + 420 + ], + "spans": [ + { + "bbox": [ + 104, + 340, + 506, + 420 + ], + "type": "text", + "content": "To algorithmically solve CountDown, we can represent the problem as a search tree (Fig. 2a). Each node in the search tree corresponds to a state defined by the current set of available numbers. At each step, the algorithm selects a pair of numbers from the set and applies one of the four operations, replacing the pair with the resulting value to create a new state. This process continues recursively until the target number is reached (correct leaf node) or all combinations are exhausted (wrong leaf node). In this work, we play the CountDown with four candidate numbers, and for each game, there are 1,152 possible search paths." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 430, + 211, + 442 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 430, + 211, + 442 + ], + "spans": [ + { + "bbox": [ + 105, + 430, + 211, + 442 + ], + "type": "text", + "content": "3.1.2 Data generation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 445, + 507, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 445, + 507, + 557 + ], + "spans": [ + { + "bbox": [ + 104, + 445, + 507, + 557 + ], + "type": "text", + "content": "We generate backtracking traces with Depth First Search (DFS) with a sum-heuristic (Gandhi et al. (2024), further details in Appendix C.1). We generate a dataset of 500,000 CountDown questions, and the DFS search correctly solves " + }, + { + "bbox": [ + 104, + 445, + 507, + 557 + ], + "type": "inline_equation", + "content": "57\\%" + }, + { + "bbox": [ + 104, + 445, + 507, + 557 + ], + "type": "text", + "content": " of the questions. The backtracking trace is a serialized version of DFS, listing all the tree nodes visited in the order of DFS traversal. To construct the direct solution training data, we prune the backtracking traces to keep only the correct solution path. With the pruning approach, we remove the exploratory parts of the trace while preserving the answer format and scaffolding used in the backtracking model, to ensure a fair comparison. We also ensure that the direct solution model does not see more solved CountDown games, we include only the 285,000 questions (i.e., " + }, + { + "bbox": [ + 104, + 445, + 507, + 557 + ], + "type": "inline_equation", + "content": "500,000 \\times 0.57" + }, + { + "bbox": [ + 104, + 445, + 507, + 557 + ], + "type": "text", + "content": ") that DFS successfully solves. We provide examples of both training data in Appendix H." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 567, + 167, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 567, + 167, + 578 + ], + "spans": [ + { + "bbox": [ + 105, + 567, + 167, + 578 + ], + "type": "text", + "content": "3.2 Suku" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 584, + 191, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 584, + 191, + 597 + ], + "spans": [ + { + "bbox": [ + 105, + 584, + 191, + 597 + ], + "type": "text", + "content": "3.2.1 Game setup" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "text", + "content": "Sudoku is another prototypical strategic game used to study reasoning and search in LLMs (Yao et al., 2023a; Long, 2023). In this work, we focus on hard " + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "inline_equation", + "content": "9 \\times 9" + }, + { + "bbox": [ + 104, + 600, + 506, + 733 + ], + "type": "text", + "content": " Sudo boards, where only about 20 of the 81 cells are pre-filled, making the search space substantially larger (see Appendix C.1 for a description of Sudo rules). To algorithmically solve Sudo, we represent the problem as a search tree (Fig. 2b). Each node corresponds to a partial board state, where some cells have been filled. At each step, the algorithm selects an unfilled cell and fills it with a candidate digit that satisfies Sudo constraints in the current state. Each valid assignment creates a new child node representing the updated board. The process continues recursively until a complete, valid solution is reached (correct leaf node) or no valid moves remain (wrong leaf node). The depth of the tree corresponds to the number of empty cells, and the branching factor at each node depends on the number of unfilled cells as well as how many digits are valid for each unfilled cell." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 79, + 236, + 175 + ], + "blocks": [ + { + "bbox": [ + 107, + 79, + 236, + 175 + ], + "lines": [ + { + "bbox": [ + 107, + 79, + 236, + 175 + ], + "spans": [ + { + "bbox": [ + 107, + 79, + 236, + 175 + ], + "type": "image", + "image_path": "6d48ef2be03c9c8fe24eb88ceffb6ab73372724258852629afc0ff253920d49b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 183, + 506, + 283 + ], + "lines": [ + { + "bbox": [ + 104, + 183, + 506, + 283 + ], + "spans": [ + { + "bbox": [ + 104, + 183, + 506, + 283 + ], + "type": "text", + "content": "Figure 3: Backtracking and direct solution models implement different search strategies for CountDown. For test questions that model solves correctly, we measure the number of mistakes made (i.e., wrong terminal nodes visited) before finding the correct solution. We sort the test questions by number of mistakes made by DFS. Left: Trained on DFS traces, the number of mistakes made by the backtracking model correlates with the DFS. Middle: In contrast, the direct solution model solves a lot more problems with significantly fewer mistakes compared to DFS. Right: For a given number of mistakes made, we examine whether two models solve the same set of question as DFS. Direct solution model implements a search strategy significantly different from DFS." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 242, + 79, + 370, + 175 + ], + "blocks": [ + { + "bbox": [ + 242, + 79, + 370, + 175 + ], + "lines": [ + { + "bbox": [ + 242, + 79, + 370, + 175 + ], + "spans": [ + { + "bbox": [ + 242, + 79, + 370, + 175 + ], + "type": "image", + "image_path": "e70f23b00318eb73998084c7cc03a262857c0b2c31e8da7e040b4feba0f83084.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 373, + 80, + 503, + 175 + ], + "blocks": [ + { + "bbox": [ + 373, + 80, + 503, + 175 + ], + "lines": [ + { + "bbox": [ + 373, + 80, + 503, + 175 + ], + "spans": [ + { + "bbox": [ + 373, + 80, + 503, + 175 + ], + "type": "image", + "image_path": "6c35e4c35a8fbc4b9bb884248ec42755727f5cb1a53b8128d768384367b79683.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 303, + 211, + 315 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 303, + 211, + 315 + ], + "spans": [ + { + "bbox": [ + 105, + 303, + 211, + 315 + ], + "type": "text", + "content": "3.2.2 Data generation" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 316, + 506, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 316, + 506, + 449 + ], + "spans": [ + { + "bbox": [ + 104, + 316, + 506, + 449 + ], + "type": "text", + "content": "We follow the same procedure as CountDown to generate training data for both the backtracking and direct solution models. We use a DFS-based search algorithm, in combination with a Sudoku solver that applies seven common human strategies (e.g., naked singles, hidden pairs and etc, Papadimas. (2023)) to eliminate candidates for unfilled cells. At each node, we use the 7 strategies to eliminate candidates for unfilled cells, and then DFS chooses an unfilled cell, makes a guess and continues solving recursively. This process continues until the board is either solved or reaches a dead-end (i.e., an invalid state with no legal moves). We use a dataset of 3M Sudoku puzzles from (Radcliffe, 2020), and the combined DFS-solver approach successfully solves " + }, + { + "bbox": [ + 104, + 316, + 506, + 449 + ], + "type": "inline_equation", + "content": "98\\%" + }, + { + "bbox": [ + 104, + 316, + 506, + 449 + ], + "type": "text", + "content": " of them. Since DFS successfully solves nearly all puzzles, we train both models on 2.8M examples and reserve the last 200K for validation and testing. We provide further details on Sudoku training data generation in Appendix C.1 and data examples in Appendix H." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 456, + 220, + 468 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 456, + 220, + 468 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 220, + 468 + ], + "type": "text", + "content": "3.3 Model and training" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 470, + 506, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 470, + 506, + 571 + ], + "spans": [ + { + "bbox": [ + 104, + 470, + 506, + 571 + ], + "type": "text", + "content": "We use Qwen2.5-style model architectures (Yang et al., 2024) with RoPE positional encoding (Su et al., 2021) and Group Query Attention (GQA) (Ainslie et al., 2023). To maximize parameter efficiency, we design custom tokenizers for both games, significantly reducing the size of the language modeling head. This allows us to train smaller models than prior work (Gandhi et al., 2024; Shah et al., 2024) while maintaining comparable performance on both tasks. For CountDown, we use a 17M parameter model with a context length of 4096 tokens; for Selenium, we use a 38M model with the same context length. See Appendix C.3 for model architecture and an exhaustive list of training hyperparameters. We train all models until validation loss converges (see Appendix G.3)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 580, + 324, + 594 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 580, + 324, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 580, + 324, + 594 + ], + "type": "text", + "content": "4 Empirical trade-offs of backtracking" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 598, + 504, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 598, + 504, + 644 + ], + "spans": [ + { + "bbox": [ + 104, + 598, + 504, + 644 + ], + "type": "text", + "content": "We first demonstrate that backtracking models do not universally outperform the direct solution models (Section 4.1) because backtracking models are restricted to learn a prescribed way of search (Section 4.2). We then identify two factors (Sections 4.3) showing how we might improve test-time scaling for backtracking models." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 651, + 299, + 662 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 651, + 299, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 651, + 299, + 662 + ], + "type": "text", + "content": "4.1 Backtracking is not always beneficial" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 505, + 733 + ], + "type": "text", + "content": "Evaluation metrics. We evaluate model performances using solving accuracy on 200 unseen problems with binary scores (either correct or incorrect, no partial credits, see appendix C.1). We use FLOPs to compare inference costs (see Appendix D for FLOPs computation). For the backtracking model, we allow models to autoregressively generate and measure how many problems the model finds the correct solution at various CoT lengths (ranging from 1024 to 4096 tokens). For the direct solution model, we generate" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 170 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 170 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 82, + 506, + 170 + ], + "type": "text", + "content": " solutions in parallel through temperature sampling at " + }, + { + "bbox": [ + 104, + 82, + 506, + 170 + ], + "type": "inline_equation", + "content": "T = 0.7" + }, + { + "bbox": [ + 104, + 82, + 506, + 170 + ], + "type": "text", + "content": ", and examine whether the model has found the correct solution within " + }, + { + "bbox": [ + 104, + 82, + 506, + 170 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 82, + 506, + 170 + ], + "type": "text", + "content": " attempts (i.e., best-of-" + }, + { + "bbox": [ + 104, + 82, + 506, + 170 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 82, + 506, + 170 + ], + "type": "text", + "content": "). Best-of-" + }, + { + "bbox": [ + 104, + 82, + 506, + 170 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 82, + 506, + 170 + ], + "type": "text", + "content": " is a suitable choice in those two games, a case where solving the task is hard but verification is trivial. In general, our analysis applies to tasks where verification can be easily done with an external verifier at test-time. This is definitely not always the case, and we leave the study of problems where test-time verification is not as easy to future work. In those tasks, one might need to consider majority voting or other strategies. See Appendix E for further discussions." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 176, + 506, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 176, + 506, + 332 + ], + "spans": [ + { + "bbox": [ + 104, + 176, + 506, + 332 + ], + "type": "text", + "content": "Results. In Fig. 1 A, we observe distinct scaling behaviors for the two models. For both games, the direct solution model's test accuracy scales linearly with increased test-time compute (measured on a logarithmic scale). This scaling behavior indicates that through parallel sampling, the backtracking model generates diverse solutions that search through different solution paths. Conversely, the backtracking model exhibits sub-linear scaling: Longer solution traces disproportionately yield smaller accuracy improvements. We attribute the sub-linear scaling to two causes. First, as reasoning chains become longer, the backtracking model might struggle to effectively track visited states and efficiently search through the solution space. Second, when models perform sequential search, the computation cost grows quadratically with CoT length (due to the attention mechanism, see Appendix D), and this further makes backtracking model less effective for scaling up test time compute. Overall, for CountDown, the direct solution model consistently outperforms its backtracking counterpart. However, this trend is reversed in Selenium, where the backtracking model consistently achieves higher accuracy." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 339, + 373, + 350 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 339, + 373, + 350 + ], + "spans": [ + { + "bbox": [ + 104, + 339, + 373, + 350 + ], + "type": "text", + "content": "4.2 Backtracking model learns both the good and the bad" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 354, + 506, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 354, + 506, + 488 + ], + "spans": [ + { + "bbox": [ + 104, + 354, + 506, + 488 + ], + "type": "text", + "content": "When teaching a child to correct math mistakes, the child understands that the goal is the correct answer—not making and then fixing errors. Humans have meta-cognitive awareness that models lack. Models trained via next-token prediction simply imitate the traces they see, including making the mistake before fixing it. In CountDown, this poses a key limitation: the backtracking model learns to follow the specific search paths seen in training. While some tasks—like shortest path finding—have optimal strategies we can supervise directly (e.g., Dijkstra's algorithm), most reasoning tasks, including CountDown, lack such guarantees. As a result, the model may be constrained by the inefficiencies in the backtracking data. In contrast, the direct solution model, trained only on correct answers, is free to discover more efficient strategies. In our subsequent analysis, we concretely show how the direct solution model successfully bypasses many inefficient search and backtracking steps learned by the backtracking model." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 492, + 400, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 492, + 400, + 504 + ], + "spans": [ + { + "bbox": [ + 104, + 492, + 400, + 504 + ], + "type": "text", + "content": "4.2.1 Backtracking model finds the solution with fewer mistakes" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 505, + 504, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 505, + 504, + 563 + ], + "spans": [ + { + "bbox": [ + 104, + 505, + 504, + 563 + ], + "type": "text", + "content": "Measuring number of mistakes. We compare the number of mistakes made by: (1) DFS (used to generate backtracking data), (2) the backtracking model, and (3) the direct solution model. For DFS and the backtracking model, mistakes are counted as the number of incorrect terminal nodes explored before finding the correct solution. For the direct solution model, mistakes correspond to how many parallel samples (" + }, + { + "bbox": [ + 104, + 505, + 504, + 563 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 505, + 504, + 563 + ], + "type": "text", + "content": " in best-of-" + }, + { + "bbox": [ + 104, + 505, + 504, + 563 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 505, + 504, + 563 + ], + "type": "text", + "content": ") are needed." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 569, + 506, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 569, + 506, + 714 + ], + "spans": [ + { + "bbox": [ + 104, + 569, + 506, + 714 + ], + "type": "text", + "content": "Comparing search strategies. We sort the 200 test problems based on mistakes made by DFS and plot mistakes for both models. Fig. 3 left compares DFS search and backtracking model. The number mistakes made by the backtracking model is correlated with the DFS backtracking trace. This observation is not surprising given that the backtracking model is trained on these traces. However, this result is interesting when we compare it against the direct solution model (Fig. 3 middle). The direct solution model solves most problems within fewer than 10 attempts—far fewer compared to DFS or the backtracking model. Fig. 3 right quantifies these observations. Specifically, for a fixed mistake budget, we use Jaccard Index to measure whether the model solves a similar set of problems as DFS solves. The backtracking model closely mirrors DFS search (high set similarity), whereas the direct solution model diverges significantly (low set similarity). Together with superior performance of the direct solution model, we conclude that the direct solution model learns more efficient search strategies, avoiding unnecessary explorations of wrong paths." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 720, + 403, + 732 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 720, + 403, + 732 + ], + "spans": [ + { + "bbox": [ + 116, + 720, + 403, + 732 + ], + "type": "text", + "content": "1 Mistakes are counted only for problems solved correctly by the model." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 83, + 329, + 244 + ], + "blocks": [ + { + "bbox": [ + 109, + 83, + 329, + 244 + ], + "lines": [ + { + "bbox": [ + 109, + 83, + 329, + 244 + ], + "spans": [ + { + "bbox": [ + 109, + 83, + 329, + 244 + ], + "type": "image", + "image_path": "f744918c025c250a03c698c53abc9f6b439ada25c09467401c31889e902dd09a.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 255, + 506, + 313 + ], + "lines": [ + { + "bbox": [ + 104, + 255, + 506, + 313 + ], + "spans": [ + { + "bbox": [ + 104, + 255, + 506, + 313 + ], + "type": "text", + "content": "Figure 4: Two different variations to improve backtracking model. (a). We hypothesize that the backtracking model can think one step ahead without sacrificing its ability to search. Therefore, we shorten the search trace by skipping the last search step. (b). Two data variations that improve the backtracking model. Mixed-backtrack model trained on a diverse set of search strategies. Think-backtracking model trained on shortened DFS trace." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 310, + 85, + 506, + 242 + ], + "blocks": [ + { + "bbox": [ + 310, + 85, + 506, + 242 + ], + "lines": [ + { + "bbox": [ + 310, + 85, + 506, + 242 + ], + "spans": [ + { + "bbox": [ + 310, + 85, + 506, + 242 + ], + "type": "image", + "image_path": "64eea15ad12a2ff0fc19f7e2b733fda8065aa921d4af04dc08e790c09f82a37d.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 337, + 322, + 350 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 337, + 322, + 350 + ], + "spans": [ + { + "bbox": [ + 104, + 337, + 322, + 350 + ], + "type": "text", + "content": "4.3 Two ways to improve backtracking model" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 354, + 506, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 354, + 506, + 443 + ], + "spans": [ + { + "bbox": [ + 104, + 354, + 506, + 443 + ], + "type": "text", + "content": "Training on diverse set of search strategies. Our analysis suggests a clear direction for improving the backtracking model: using better search strategies to improve backtracking traces. Beyond DFS, we explored alternatives including Breadth-First Search (BFS) and various heuristic methods (see Appendix G.1). Despite these efforts, no single search strategy significantly outperformed DFS. Inspired by Gandhi et al. (2024), we trained a variant of the backtracking model—mix-backtrack model—using a diverse mixture of BFS and DFS strategies (32 in total), aiming to help the model discover more optimal search patterns." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 453, + 507, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 453, + 507, + 544 + ], + "spans": [ + { + "bbox": [ + 104, + 453, + 507, + 544 + ], + "type": "text", + "content": "Backtracking model thinks less and talks more. Apart from learning suboptimal search strategies, another inefficiency in the backtracking model is caused by the model learns to be excessively verbose. Specifically, by requiring the model to explicitly output every step of the DFS, we may prevent it from internalizing part of the reasoning process. Concretely, we hypothesize that for CountDown, the model can internally plan at least one step ahead, allowing it to shorten its explicit reasoning trace without losing its ability to perform DFS. To test hypothesis, we train a variation—the think-backtrack model—on shortened DFS traces, skipping one intermediate step (Fig. 4, A)." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 554, + 507, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 554, + 507, + 633 + ], + "spans": [ + { + "bbox": [ + 104, + 554, + 507, + 633 + ], + "type": "text", + "content": "Mix-strategy results. Fig. 4 (B) compares this mixed-strategy model against the original backtracking and direct solution models. We also include a training data upper bound, representing perfect execution of the mixed search strategies. The mixed-strategy model improves over the original backtracking model and closely approaches its training-data upper bound. However, even with deliberate attempts to optimize search strategies, surpassing the performance of the direct solution model remains challenging. This experiment underscores the inherent difficulty in identifying superior handcrafted search traces." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 507, + 734 + ], + "type": "text", + "content": "Think-backtrack results. Fig. 4 (B) also compares the performance of the think-backtrack model. By encouraging the model to internalize parts of the reasoning process, the think-backtrack model achieves performances comparable to the direct solution model. This result suggests that models with backtracking ability might produce long but ineffective CoT. By training the model to avoid making the mistakes at the first place, we reduce model morbidity without sacrificing its search capability, and in turn improving test-time-compute scaling. As an additional evidence, in Appendix G.2, we show that the think-backtrack model solves a superset of test problems solved by the original backtrack model." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 82, + 504, + 381 + ], + "blocks": [ + { + "bbox": [ + 107, + 82, + 504, + 381 + ], + "lines": [ + { + "bbox": [ + 107, + 82, + 504, + 381 + ], + "spans": [ + { + "bbox": [ + 107, + 82, + 504, + 381 + ], + "type": "image", + "image_path": "9b1b98b03adf16268a74f8dec7aaeb6508fe5869505875ad5daa40391ef2fe85.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 103, + 392, + 507, + 449 + ], + "lines": [ + { + "bbox": [ + 103, + 392, + 507, + 449 + ], + "spans": [ + { + "bbox": [ + 103, + 392, + 507, + 449 + ], + "type": "text", + "content": "Figure 5: Different scaling behaviors for backtracking versus direct solution model. CountDown (A). Backtracking model performance does not improve as we scale up model size. (B). The direct solution model improves (C). Direct solution model consistently outperforms backtracking model. Sodomu (D, E). Both models' performances improve as we scale up model size. (F). Direct solution model consistently underperforms backtracking model." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 469, + 463, + 484 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 469, + 463, + 484 + ], + "spans": [ + { + "bbox": [ + 104, + 469, + 463, + 484 + ], + "type": "text", + "content": "5 Model size and tree depth impact the efficacy of backtracking" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 489, + 506, + 557 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 489, + 506, + 557 + ], + "spans": [ + { + "bbox": [ + 104, + 489, + 506, + 557 + ], + "type": "text", + "content": "While we've shown that backtracking might lead to ineffective test-time scaling, other factors also shape its effectiveness. In Section 5.1, we show that backtracking and direct solution models scale differently with model sizes. To explain the contrasting outcomes (Fig. 1 A) between CountDown and Selenium, in Appendix F, we show that task differences—particularly search tree depth—play a key role: deeper tasks like Selenium benefit more from backtracking." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 565, + 254, + 577 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 565, + 254, + 577 + ], + "spans": [ + { + "bbox": [ + 105, + 565, + 254, + 577 + ], + "type": "text", + "content": "5.1 Dependence on model size" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 580, + 506, + 626 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 580, + 506, + 626 + ], + "spans": [ + { + "bbox": [ + 104, + 580, + 506, + 626 + ], + "type": "text", + "content": "We now investigate how model size impacts the performance of backtracking and direct solution models. We evaluate four model scales—3M, 17M, 38M, and 144M—by proportionally increasing the number of attention heads, embedding dimensions, and number of attention layers. Detailed model configurations can be found in Appendix C.2." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 632, + 506, + 733 + ], + "type": "text", + "content": "CountDown. Scaling up model size improves the performance of the direct solution model (Fig. 5 B) across all test-time-compute budgets. When trained exclusively on correct solutions, larger models can independently discover highly effective search strategies. In contrast, the backtracking model shows no improvements with increased model sizes (Fig. 5 A). The lack of improvement from model scaling can be explained by training data: The performance of backtracking model is constrained by the quality of the backtracking traces used for training. As previously seen in Fig. 4 (right), the 17M backtracking model is already approaching the performance ceiling that is set by the training data. Training larger models on the same backtracking data would not lead to further performance improvements. Due" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "to different scaling behaviors between backtracking and direct solution models, the gap in performances between two types of models widens with increasing model sizes (Fig. 5 C)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 111, + 506, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 111, + 506, + 234 + ], + "spans": [ + { + "bbox": [ + 104, + 111, + 506, + 234 + ], + "type": "text", + "content": "Sudoku. Similar to CountDown, the performances of direct solution models improve with increased model sizes (Fig. 5 E). Unlike CountDown, however, the backtracking model also significantly benefits from scaling (Fig. 5 D). This difference can again be explained by examining the backtracking training data. Sodomu is inherently more complex than CountDown. The DFS backtracking traces successfully solve " + }, + { + "bbox": [ + 104, + 111, + 506, + 234 + ], + "type": "inline_equation", + "content": "97\\%" + }, + { + "bbox": [ + 104, + 111, + 506, + 234 + ], + "type": "text", + "content": " of test boards—far exceeding the current performance of all four tested model sizes. Because the backtracking model for Sodomu has not yet reached training data performance ceiling, increased model capacity leads to improved results. On the other hand, due to the complexity and large search space of the game, the backtracking models' performance gains start to diminish as the search traces become longer. As a result, the backtracking model consistently outperforms the direct solution model across scales, but the advantages diminishes at larger compute budgets (Fig. 5 E)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 244, + 369, + 259 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 244, + 369, + 259 + ], + "spans": [ + { + "bbox": [ + 104, + 244, + 369, + 259 + ], + "type": "text", + "content": "6 GRPO: Learning beyond the imitation game" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 264, + 507, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 264, + 507, + 300 + ], + "spans": [ + { + "bbox": [ + 104, + 264, + 507, + 300 + ], + "type": "text", + "content": "So far, we have shown that under supervised learning, backtracking is not always optimal for scaling test-time compute. We now explore how further training both backtracking and direct solution models with reinforcement learning leads to qualitatively different outcomes." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 306, + 304, + 319 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 306, + 304, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 306, + 304, + 319 + ], + "type": "text", + "content": "6.1 Continue training models with GRPO" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 321, + 506, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 321, + 506, + 433 + ], + "spans": [ + { + "bbox": [ + 104, + 321, + 506, + 433 + ], + "type": "text", + "content": "Recently, RL has become a popular approach to further enhance LLMs performance on challenging benchmarks such as MATH (Hendrycks et al., 2021) and AIME (AIME, 2024). Here, we study the effects of RL in a controlled setting, focusing on how it impacts a model's backtracking behaviors (sequential search) and as well as a model's parallel search capability (sampling with best-of- " + }, + { + "bbox": [ + 104, + 321, + 506, + 433 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 321, + 506, + 433 + ], + "type": "text", + "content": "). We take the CountDown backtracking and direct solution models, which have been trained to convergence under the supervised learning objective (see Appendix G.3 for training curves). We then continue training each model using GRPO (Shao et al., 2024), following verl's (Sheng et al., 2024) implementation. We perform GRPO on the same training data used for the supervised learning. As before, we evaluate performance across different test-time compute budgets." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 441, + 366, + 453 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 441, + 366, + 453 + ], + "spans": [ + { + "bbox": [ + 104, + 441, + 366, + 453 + ], + "type": "text", + "content": "6.2 Backtracking model discovers new search strategies" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 455, + 506, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 455, + 506, + 534 + ], + "spans": [ + { + "bbox": [ + 104, + 455, + 506, + 534 + ], + "type": "text", + "content": "Figure 1 C shows that the backtracking model post GRPO sees an performance boost across all test-compute budgets. The post-GRPO model (dark red) reaches an accuracy comparable to the pre-GRPO direct solution model (light blue). This improvement is surprising for two reasons: (1) at maximum compute (4096 tokens), the model solves nearly " + }, + { + "bbox": [ + 104, + 455, + 506, + 534 + ], + "type": "inline_equation", + "content": "70\\%" + }, + { + "bbox": [ + 104, + 455, + 506, + 534 + ], + "type": "text", + "content": " of the test set—exceeding the performance of the DFS strategy used to generate training data " + }, + { + "bbox": [ + 104, + 455, + 506, + 534 + ], + "type": "inline_equation", + "content": "(57\\%)" + }, + { + "bbox": [ + 104, + 455, + 506, + 534 + ], + "type": "text", + "content": "; and (2) the model was trained on questions it has already seen during supervised learning, with no new problems introduced during GRPO." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 542, + 506, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 542, + 506, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 542, + 506, + 654 + ], + "type": "text", + "content": "These gains suggest that the backtracking model, once freed from the constraints of predicting next token on DFS traces, can now discover better search strategies. To concretely show that the backtracking model post-GRPO learns search strategies different from DFS training traces, we revisit the mistake-counting analysis from Section 4.2.1 (Figure 3). For each test problem, we compute the number of mistakes as before (i.e., counting how many incorrect terminal nodes are explored before reaching a correct solution). Using the same set similarity measure as before, we quantify the strategy deviation in Figure 6 (left). The smaller Jaccard index values confirm that through GRPO, the backtracking model has learned new and more effective search behaviors. In Appendix G.4, we also show the per-problem scatter plot as done in Figure 3." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 661, + 328, + 673 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 661, + 328, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 661, + 328, + 673 + ], + "type": "text", + "content": "6.3 Direct solution model specializes at pass@1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 676, + 505, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 676, + 505, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 676, + 505, + 734 + ], + "type": "text", + "content": "We now show that compared to backtracking models, GRPO has remarkably different effects on direct solution models. As shown in Figure 1 C, the direct solution model post-GRPO achieves strong performance at the smallest compute budget (pass@1), solving " + }, + { + "bbox": [ + 104, + 676, + 505, + 734 + ], + "type": "inline_equation", + "content": "42.5\\%" + }, + { + "bbox": [ + 104, + 676, + 505, + 734 + ], + "type": "text", + "content": " of unseen CountDown puzzles (82 out of 200 test problems). None of the handcrafted search strategies (Appendix G.1) can reach such high accuracy. To understand the impressive gain" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 79, + 312, + 209 + ], + "blocks": [ + { + "bbox": [ + 109, + 79, + 312, + 209 + ], + "lines": [ + { + "bbox": [ + 109, + 79, + 312, + 209 + ], + "spans": [ + { + "bbox": [ + 109, + 79, + 312, + 209 + ], + "type": "image", + "image_path": "74d0920a8d16477d6e33f418d0398f280da866ebfc2cffcf91324a04375b76a2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 219, + 504, + 266 + ], + "lines": [ + { + "bbox": [ + 104, + 219, + 504, + 266 + ], + "spans": [ + { + "bbox": [ + 104, + 219, + 504, + 266 + ], + "type": "text", + "content": "Figure 6: GRPO has different effect on backtracking versus direct solution model Left: After GRPO, the backtracking model's search strategy starts to deviate away from the DFS search. Right: For problems the pre-GRPO direct solution model (blue) have a non-zero pass@k solving probabilities, the post-GRPO direct solution model (red) solves with pass@1." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 334, + 80, + 501, + 209 + ], + "blocks": [ + { + "bbox": [ + 334, + 80, + 501, + 209 + ], + "lines": [ + { + "bbox": [ + 334, + 80, + 501, + 209 + ], + "spans": [ + { + "bbox": [ + 334, + 80, + 501, + 209 + ], + "type": "image", + "image_path": "58fd3a6281a1bb9dfa15bece1a73c9939603a56ca0f044b45ec84822d61fe0f3.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 285, + 506, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 285, + 506, + 365 + ], + "spans": [ + { + "bbox": [ + 104, + 285, + 506, + 365 + ], + "type": "text", + "content": "on 1-shot performance, we examine those 82 problems, and discover that the pre-GRPO direct solution model was able to find correct solution by sampling best-of-" + }, + { + "bbox": [ + 104, + 285, + 506, + 365 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 285, + 506, + 365 + ], + "type": "text", + "content": " (with " + }, + { + "bbox": [ + 104, + 285, + 506, + 365 + ], + "type": "inline_equation", + "content": "n \\leq 64" + }, + { + "bbox": [ + 104, + 285, + 506, + 365 + ], + "type": "text", + "content": "). We now examine a model's solving probabilities (i.e., measuring pass@k rate out of the 64 generations). We compare the pass@k rate for the diret solution model pre and post GRPO, shown in Figure 6, right. We rank the 200 test problems by the pre-GRPO model's solving probabilities. For problems that the pre-GRPO model has a non-zero pass@k rate, the post-GRPO model can solve most of them with pass@1." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 372, + 504, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 372, + 504, + 441 + ], + "spans": [ + { + "bbox": [ + 104, + 372, + 504, + 441 + ], + "type": "text", + "content": "However, this improvement in 1-shot performance comes with a substantial trade-off: the model loses its ability to generate diverse solutions. As a result, when we perform parallel search using best-of-" + }, + { + "bbox": [ + 104, + 372, + 504, + 441 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 372, + 504, + 441 + ], + "type": "text", + "content": ", the direct solution model post-GRPO fail to explore different solution paths, hurting its test-time-scaling effectiveness. Therefore, test-time compute scaling becomes ineffective as we increase compute budgets, forming a sharp contrast to the backtracking model's consistent improvements across the full compute budget." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 449, + 278, + 462 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 449, + 278, + 462 + ], + "spans": [ + { + "bbox": [ + 105, + 449, + 278, + 462 + ], + "type": "text", + "content": "7 Conclusion and discussions" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 468, + 506, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 468, + 506, + 568 + ], + "spans": [ + { + "bbox": [ + 104, + 468, + 506, + 568 + ], + "type": "text", + "content": "In this work, we conducted a controlled empirical investigation into the efficacy of teaching backtracking to large language models (LLMs) as a method for scaling test-time computation. Using two strategic games, CountDown and Sudoku, we demonstrated that backtracking does not universally outperform parallel solution strategies; rather, its effectiveness depends significantly on task characteristics, model scale, and training approach. Appendix A, we show that our results in synthetic setting generalize: even in real-world reasoning tasks, backtracking is not always beneficial. Additionally, our reinforcement learning experiments uncovered a unique synergy between backtracking capabilities and RL-based training, enabling models to discover novel strategies." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 574, + 506, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 574, + 506, + 696 + ], + "spans": [ + { + "bbox": [ + 104, + 574, + 506, + 696 + ], + "type": "text", + "content": "Limitations and future work. While our experiments relied on two strategic games (Count-Down and Sudo) and models trained from scratch—common practices for controlled studies—an important avenue for future research is extending our findings to complex, real-world reasoning tasks such as coding and mathematical problem-solving. For future work, developing precise ways to characterize tasks that benefit from backtracking will be valuable for guiding model training. Finally, while we intentionally created a dichotomy between pure backtracking and direct-solution models, real-world applications may require hybrid strategies that dynamically choose between direct generation and explicit search based on problem complexity. Investigating whether LLMs can autonomously optimize their reasoning modes, particularly through reinforcement learning paradigms, is a promising future direction for improving the flexibility and efficiency of model reasoning." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 212, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 212, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 212, + 95 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 99, + 506, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 99, + 506, + 146 + ], + "spans": [ + { + "bbox": [ + 104, + 99, + 506, + 146 + ], + "type": "text", + "content": "We thank Core Francisco Park and Bingbin Liu for helpful discussions and feedback throughout the development of this work. TQ and DAM acknowledge support from the Kempner Institute, the Aramont Fellowship Fund, and the FAS Dean's Competitive Fund for Promising Scholarship." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 156, + 168, + 168 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 156, + 168, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 168, + 168 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 177, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 106, + 177, + 505, + 200 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 177, + 505, + 200 + ], + "spans": [ + { + "bbox": [ + 106, + 177, + 505, + 200 + ], + "type": "text", + "content": "AIME. American invitational mathematics examination, 2024. URL https://maa.org/math-competitions/american-invitational-mathematics-examination-aime." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 208, + 506, + 242 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 208, + 506, + 242 + ], + "spans": [ + { + "bbox": [ + 106, + 208, + 506, + 242 + ], + "type": "text", + "content": "Joshua Ainslie, James Lee-Thorp, Michiel de Jong, Yury Zemlyanskiy, Federico Lebrón, and Sumit Sanghai. GQA: Training generalized multi-query transformer models from multi-head checkpoints. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2305.13245." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 248, + 506, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 248, + 506, + 293 + ], + "spans": [ + { + "bbox": [ + 106, + 248, + 506, + 293 + ], + "type": "text", + "content": "Afra Feyza Akyurek, Ekin Akyurek, Aman Madaan, Ashwin Kalyan, Peter Clark, Derry Wijaya, and Niket Tandon. RL4F: Generating natural language feedback with reinforcement learning for repairing model outputs. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2305.08844." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 300, + 506, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 300, + 506, + 335 + ], + "spans": [ + { + "bbox": [ + 106, + 300, + 506, + 335 + ], + "type": "text", + "content": "Zachary Ankner, Mansheej Paul, Brandon Cui, Jonathan D Chang, and Prithviraj Ammanabrolu. Critique-out-loud reward models. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2408.11791." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 342, + 505, + 376 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 342, + 505, + 376 + ], + "spans": [ + { + "bbox": [ + 106, + 342, + 505, + 376 + ], + "type": "text", + "content": "Bradley Brown, Jordan Juravsky, Ryan Ehrlich, Ronald Clark, Quoc V Le, Christopher Ré, and Azalia Mirhoseini. Large language monkeys: Scaling inference compute with repeated sampling. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2407.21787." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 383, + 506, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 383, + 506, + 417 + ], + "spans": [ + { + "bbox": [ + 106, + 383, + 506, + 417 + ], + "type": "text", + "content": "Sehyun Choi, Tianqing Fang, Zhaowei Wang, and Yangqiu Song. KCTS: Knowledge-constrained tree search decoding with token-level hallucination detection. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2310.09044." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 423, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 423, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 423, + 506, + 732 + ], + "type": "text", + "content": "DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z F Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J L Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R J Chen, R L Jin, Ruyi Chen, Shanghai Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, S S Li, Shuang Zhou, Shaoqing Wu, Shengfeng Ye, Tao Yun, Tian Pei, Tianyu Sun, T Wang, Wangding Zeng, Wanjia Zhao, Wen Liu, Wenfeng Liang, Wenjun Gao, Wenqin Yu, Wentao Zhang, W L Xiao, Wei An, Xiaodong Liu, Xiaohan Wang, Xiaokang Chen, Xiaotao Nie, Xin Cheng, Xin Liu, Xin Xie, Xingchao Liu, Xinyu Yang, Xinyuan Li, Xuecheng Su, Xuheng Lin, X Q Li, Xiangyue Jin, Xiaojin Shen, Xiaosha Chen, Xiaowen Sun, Xiaoxiang Wang, Xinnan Song, Xinyi Zhou, Xianzu Wang, Xinxia Shan, Y K Li, Y Q Wang, Y X Wei, Yang Zhang, Yanhong Xu, Yao Li, Yao Zhao, Yaofeng Sun, Yaohui Wang, Yi Yu, Yichao Zhang, Yifan Shi, Yiliang Xiong. Ying He,Yishi Piao,Yisong Wang,Yixuan Tan,Yiyang Ma,Yiyuan Liu,Yongqiang Guo. Yuan OuYuduan Wang Yue Gong.Yuheng ZouYujia HeYunfan XiongYuxiang Luo. Yuxiang YouYuxuan LiuYuyang ZhouYX ZhuYanhong Xu,Yanping Huang,Yaohui Li,Yi Zheng,Yuchen Zhu,Yunxian Ma,Ying Tang,Yukun Zha,Yuting YanZ Z Ren Zehui Ren,Zhangli Sha,Zhe Fu,Zhean XuZhenda XieZhengyan Zhang,Zhenwen Hao. Zhicheng Ma,Zhigang Yan,Zhiyu WuZihui GuZijia ZhuZijun LiuZilin LiZiwei Xie" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 731 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 115, + 81, + 506, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 81, + 506, + 116 + ], + "spans": [ + { + "bbox": [ + 115, + 81, + 506, + 116 + ], + "type": "text", + "content": "Ziyang Song, Zizheng Pan, Zhen Huang, Zhipeng Xu, Zhongyu Zhang, and Zhen Zhang. DeepSeek-R1: Incentivizing reasoning capability in LLMs via reinforcement learning. arXiv [cs.CL], 2025. URL http://arxiv.org/abs/2501.12948." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 122, + 506, + 157 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 122, + 506, + 157 + ], + "spans": [ + { + "bbox": [ + 107, + 122, + 506, + 157 + ], + "type": "text", + "content": "Kanishk Gandhi, Denise Lee, Gabriel Grand, Muxin Liu, Winson Cheng, Archit Sharma, and Noah D Goodman. Stream of search (SoS): Learning to search in language. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2404.03683." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 163, + 506, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 163, + 506, + 198 + ], + "spans": [ + { + "bbox": [ + 106, + 163, + 506, + 198 + ], + "type": "text", + "content": "Kanishk Gandhi, Ayush Chakravarthy, Anikait Singh, Nathan Lile, and Noah D Goodman. Cognitive behaviors that enable self-improving reasoners, or, four habits of highly effective STaRs. arXiv [cs.CL], 2025. URL http://arxiv.org/abs/2503.01307." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 205, + 506, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 205, + 506, + 250 + ], + "spans": [ + { + "bbox": [ + 106, + 205, + 506, + 250 + ], + "type": "text", + "content": "Alex Havrilla, Sharath Raparthy, Christoforus Nalmpantis, Jane Dwivedi-Yu, Maksym Zhuravinskyi, Eric Hambro, and Roberta Raileanu. GLoRe: When, where, and how to improve LLM reasoning via global and local refinements. arXiv [cs.CL], 2024. URL http://arxiv.org/abs/2402.10963." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 256, + 506, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 256, + 506, + 291 + ], + "spans": [ + { + "bbox": [ + 106, + 256, + 506, + 291 + ], + "type": "text", + "content": "Dan Hendrycks, Collin Burns, Saurav Kadavath, Akul Arora, Steven Basart, Eric Tang, Dawn Song, and Jacob Steinhardt. Measuring mathematical problem solving with the MATH dataset. arXiv [cs.LG], 2021. URL http://arxiv.org/abs/2103.03874." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 297, + 504, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 297, + 504, + 332 + ], + "spans": [ + { + "bbox": [ + 105, + 297, + 504, + 332 + ], + "type": "text", + "content": "Zhenyu Hou, Xin Lv, Rui Lu, Jiajie Zhang, Yujiang Li, Zijun Yao, Juanzi Li, Jie Tang, and Yuxiao Dong. Advancing language model reasoning through reinforcement learning and inference scaling. arXiv [cs.LG], 2025. URL http://arxiv.org/abs/2501.11651." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 338, + 506, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 338, + 506, + 393 + ], + "spans": [ + { + "bbox": [ + 106, + 338, + 506, + 393 + ], + "type": "text", + "content": "Robert Irvine, Douglas Boubert, Vyas Raina, Adrian Liusie, Ziyi Zhu, Vineet Mudupalli, Aliaksei Korshuk, Zongyi Liu, Fritz Cremer, Valentin Assassi, Christie-Carol Beauchamp, Xiaoding Lu, Thomas Rialan, and William Beauchamp. Rewarding chatbots for real-world engagement with millions of users. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2303.06135." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 400, + 506, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 400, + 506, + 445 + ], + "spans": [ + { + "bbox": [ + 106, + 400, + 506, + 445 + ], + "type": "text", + "content": "Amirhossein Kazemnejad, Milad Aghajohari, Eva Portelance, Alessandro Sordoni, Siva Reddy, Aaron Courville, and Nicolas Le Roux. VinePPO: Unlocking RL potential for LLM reasoning through refined credit assignment. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2410.01679." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 453, + 506, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 453, + 506, + 509 + ], + "spans": [ + { + "bbox": [ + 106, + 453, + 506, + 509 + ], + "type": "text", + "content": "Aviral Kumar, Vincent Zhuang, Rishabh Agarwal, Yi Su, John D Co-Reyes, Avi Singh, Kate Baumli, Shariq Iqbal, Colton Bishop, Rebecca Roelofs, Lei M Zhang, Kay McKinney, Disha Shrivastava, Cosmin Paduraru, George Tucker, Doina Precup, Feryal Behbahani, and Aleksandra Faust. Training language models to self-correct via reinforcement learning. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2409.12917." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 515, + 504, + 539 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 515, + 504, + 539 + ], + "spans": [ + { + "bbox": [ + 105, + 515, + 504, + 539 + ], + "type": "text", + "content": "Ariel N Lee, Cole J Hunter, and Nataniel Ruiz. Platypus: Quick, cheap, and powerful refinement of LLMs. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2308.07317." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 544, + 506, + 579 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 544, + 506, + 579 + ], + "spans": [ + { + "bbox": [ + 106, + 544, + 506, + 579 + ], + "type": "text", + "content": "Kuang-Huei Lee, Ian Fischer, Yueh-Hua Wu, Dave Marwood, Shumeet Baluja, Dale Schuurmans, and Xinyun Chen. Evolving deeper LLM thinking. arXiv [cs.AI], 2025. URL http://arxiv.org/abs/2501.09891." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 586, + 506, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 586, + 506, + 609 + ], + "spans": [ + { + "bbox": [ + 106, + 586, + 506, + 609 + ], + "type": "text", + "content": "Noam Levi. A simple model of inference scaling laws. arXiv [stat.ML], 2024. URL http://arxiv.org/abs/2410.16377." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 616, + 506, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 616, + 506, + 651 + ], + "spans": [ + { + "bbox": [ + 106, + 616, + 506, + 651 + ], + "type": "text", + "content": "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. Let's verify step by step. arXiv [cs.LG], 2023. URL http://arxiv.org/abs/2305.20050." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 657, + 504, + 701 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 657, + 504, + 701 + ], + "spans": [ + { + "bbox": [ + 106, + 657, + 504, + 701 + ], + "type": "text", + "content": "Jiacheng Liu, Andrew Cohen, Ramakanth Pasunuru, Yejin Choi, Hannaneh Hajishirzi, and Asli Celikyilmaz. Don't throw away your value model! generating more preferable text with value-guided monte-carlo tree search decoding. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2309.15028." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 708, + 504, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 504, + 731 + ], + "type": "text", + "content": "Jieyi Long. Large language model guided tree-of-thought. arXiv [cs.AI], 2023. URL http://arxiv.org/abs/2305.08291." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 733 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 128 + ], + "type": "text", + "content": "Liangchen Luo, Yinxiao Liu, Rosanne Liu, Samrat Phatale, Meiqi Guo, Harsh Lara, Yunxuan Li, Lei Shu, Yun Zhu, Lei Meng, Jiao Sun, and Abhinav Rastogi. Improve mathematical reasoning in language models by automated process supervision. arXiv [cs.CL], 2024. URL http://arxiv.org/abs/2406.06592." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 133, + 506, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 133, + 506, + 170 + ], + "spans": [ + { + "bbox": [ + 105, + 133, + 506, + 170 + ], + "type": "text", + "content": "Niklas Muennighoff, Zitong Yang, Weijia Shi, Xiang Lisa Li, Li Fei-Fei, Hannaneh Hajishirzi, Luke Zettlemoyer, Percy Liang, Emmanuel Candès, and Tatsunori Hashimoto. s1: Simple test-time scaling. arXiv [cs.CL], 2025. URL http://arxiv.org/abs/2501.19393." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 175, + 504, + 199 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 175, + 504, + 199 + ], + "spans": [ + { + "bbox": [ + 105, + 175, + 504, + 199 + ], + "type": "text", + "content": "Aleksei Maslakov And Papadimas. Sudoku solver with step-by-step guidance, 2023. URL https://github.com/unmade/dokusan, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 206, + 506, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 206, + 506, + 241 + ], + "spans": [ + { + "bbox": [ + 105, + 206, + 506, + 241 + ], + "type": "text", + "content": "Debjit Paul, Mete Ismayilzada, Maxime Peyrard, Beatrix Borges, Antoine Bosselut, Robert West, and Boi Faltings. REFINER: Reasoning feedback on intermediate representations. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2304.01904." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 247, + 506, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 247, + 506, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 247, + 506, + 282 + ], + "type": "text", + "content": "Yuxiao Qu, Tianjun Zhang, Naman Garg, and Aviral Kumar. Recursive IntroSpEction: Teaching language model agents how to self-improve. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2407.18219." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 289, + 506, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 289, + 506, + 314 + ], + "spans": [ + { + "bbox": [ + 105, + 289, + 506, + 314 + ], + "type": "text", + "content": "David Radcliffe. 3 million crossword puzzles with ratings. Website, 2020. URL https://www.kaggle.com/datasets/radcliffe/3-million-sudoku-puzzles-with-ratings." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 320, + 504, + 355 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 320, + 504, + 355 + ], + "spans": [ + { + "bbox": [ + 105, + 320, + 504, + 355 + ], + "type": "text", + "content": "William Saunders, Catherine Yeh, Jeff Wu, Steven Bills, Long Ouyang, Jonathan Ward, and Jan Leike. Self-critiquing models for assisting human evaluators. arXiv [cs.CL], 2022. URL http://arxiv.org/abs/2206.05802." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 361, + 504, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 361, + 504, + 397 + ], + "spans": [ + { + "bbox": [ + 105, + 361, + 504, + 397 + ], + "type": "text", + "content": "Kulin Shah, Nishanth Dikkala, Xin Wang, and Rina Panigrahy. Causal language modeling can elicit search and reasoning capabilities on logic puzzles. arXiv [cs.LG], 2024. URL http://arxiv.org/abs/2409.10502." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 403, + 504, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 403, + 504, + 438 + ], + "spans": [ + { + "bbox": [ + 105, + 403, + 504, + 438 + ], + "type": "text", + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Mingchuan Zhang, Y K Li, Y Wu, and Daya Guo. DeepSeekMath: Pushing the limits of mathematical reasoning in open language models. arXiv [cs.CL], 2024. URL http://arxiv.org/abs/2402.03300." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 445, + 506, + 480 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 445, + 506, + 480 + ], + "spans": [ + { + "bbox": [ + 105, + 445, + 506, + 480 + ], + "type": "text", + "content": "Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. HybridFlow: A flexible and efficient RLHF framework. arXiv preprint arXiv: 2409.19256, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 487, + 506, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 487, + 506, + 521 + ], + "spans": [ + { + "bbox": [ + 105, + 487, + 506, + 521 + ], + "type": "text", + "content": "Jianlin Su, Yu Lu, Shengfeng Pan, Ahmed Murtadha, Bo Wen, and Yunfeng Liu. RoFormer: Enhanced transformer with rotary position embedding. arXiv [cs.CL], 2021. URL http://arxiv.org/abs/2104.09864." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 529, + 506, + 595 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 529, + 506, + 595 + ], + "spans": [ + { + "bbox": [ + 105, + 529, + 506, + 595 + ], + "type": "text", + "content": "Peiyi Wang, Lei Li, Zhihong Shao, Runxin Xu, Damai Dai, Yifei Li, Deli Chen, Yu Wu, and Zhifang Sui. Math-shepherd: Verify and reinforce LLMs step-by-step without human annotations. In Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pp. 9426-9439, Stroudsburg, PA, USA, 2024. Association for Computational Linguistics. URL http://dx.doi.org/10.18653/v1/2024.acl-long.510." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 603, + 506, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 603, + 506, + 649 + ], + "spans": [ + { + "bbox": [ + 105, + 603, + 506, + 649 + ], + "type": "text", + "content": "Tianlu Wang, Ping Yu, Xiaoqing Ellen Tan, Sean O'Brien, Ramakanth Pasunuru, Jane Dwivedi-Yu, Olga Golovneva, Luke Zettlemoyer, Maryam Fazel-Zarandi, and Asli Celikyilmaz. Shepherd: A critic for language model generation. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2308.04592." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 655, + 504, + 690 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 655, + 504, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 655, + 504, + 690 + ], + "type": "text", + "content": "Sean Welleck, Ximing Lu, Peter West, Faeze Brahman, Tianxiao Shen, Daniel Khashabi, and Yejin Choi. Generating sequences by learning to self-correct. arXiv [cs.CL], 2022. URL http://arxiv.org/abs/2211.00053." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 697, + 504, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 697, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 697, + 504, + 733 + ], + "type": "text", + "content": "Yangzhen Wu, Zhiqing Sun, Shanda Li, Sean Welleck, and Yiming Yang. Inference scaling laws: An empirical analysis of compute-optimal inference for problem-solving with language models. arXiv [cs.AI], 2024. URL http://arxiv.org/abs/2408.00724." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 506, + 549 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "spans": [ + { + "bbox": [ + 107, + 81, + 506, + 116 + ], + "type": "text", + "content": "Yuxi Xie, Kenji Kawaguchi, Yiran Zhao, Xu Zhao, Min-Yen Kan, Junxian He, and Qizhe Xie. Self-evaluation guided beam search for reasoning. arXiv [cs.CL], 2023. URL http://arxiv.org/abs/2305.00633." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 122, + 506, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 506, + 167 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 506, + 167 + ], + "type": "text", + "content": "Huajian Xin, Daya Guo, Zhihong Shao, Zhizhou Ren, Qihao Zhu, Bo Liu, Chong Ruan, Wenda Li, and Xiaodan Liang. DeepSeek-prover: Advancing theorem proving in LLMs through large-scale synthetic data. arXiv [cs.AI], 2024. URL http://arxiv.org/abs/2405.14333." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 174, + 506, + 253 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 174, + 506, + 253 + ], + "spans": [ + { + "bbox": [ + 105, + 174, + 506, + 253 + ], + "type": "text", + "content": "An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, Huan Lin, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Yang, Jiaxi Yang, Jingren Zhou, Junyang Lin, Kai Dang, Keming Lu, Keqin Bao, Kexin Yang, Le Yu, Mei Li, Mingfeng Xue, Pei Zhang, Qin Zhu, Rui Men, Runji Lin, Tianhao Li, Tingyu Xia, Xingzhang Ren, Xuancheng Ren, Yang Fan, Yang Su, Yichang Zhang, Yu Wan, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zihan Qiu. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 259, + 504, + 293 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 259, + 504, + 293 + ], + "spans": [ + { + "bbox": [ + 105, + 259, + 504, + 293 + ], + "type": "text", + "content": "Shunyu Yao, Dian Yu, Jeffrey Zhao, Izhak Shafran, Thomas L Griffiths, Yuan Cao, and Karthik Narasimhan. Tree of thoughts: Deliberate problem solving with large language models. arXiv [cs.CL], 2023a. URL http://arxiv.org/abs/2305.10601." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 300, + 506, + 355 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 300, + 506, + 355 + ], + "spans": [ + { + "bbox": [ + 105, + 300, + 506, + 355 + ], + "type": "text", + "content": "Weiran Yao, Shelby Heinecke, Juan Carlos Niebles, Zhiwei Liu, Yihao Feng, Le Xue, Rithesh Murthy, Zeyuan Chen, Jianguo Zhang, Devansh Arpit, Ran Xu, Phil Mui, Huan Wang, Caiming Xiong, and Silvio Savarese. Retroformer: Retrospective large language agents with policy gradient optimization. arXiv [cs.CL], 2023b. URL http://arxiv.org/abs/2308.02151." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 362, + 504, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 362, + 504, + 397 + ], + "spans": [ + { + "bbox": [ + 105, + 362, + 504, + 397 + ], + "type": "text", + "content": "Seonghyeon Ye, Yongrae Jo, Doyoung Kim, Sungdong Kim, Hyeonbin Hwang, and Minjoon Seo. SelFee: Iterative self-revising LLM empowered by self-feedback generation. Blog post, 2023. URL https://kaistai.github.io/SelFee/." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 403, + 504, + 427 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 403, + 504, + 427 + ], + "spans": [ + { + "bbox": [ + 105, + 403, + 504, + 427 + ], + "type": "text", + "content": "Eric Zelikman, Yuhuai Wu, Jesse Mu, and Noah D Goodman. STaR: Bootstrapping reasoning with reasoning. arXiv [cs.LG], 2022. URL http://arxiv.org/abs/2203.14465." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 434, + 504, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 434, + 504, + 468 + ], + "spans": [ + { + "bbox": [ + 105, + 434, + 504, + 468 + ], + "type": "text", + "content": "Shun Zhang, Zhenfang Chen, Yikang Shen, Mingyu Ding, Joshua B Tenenbaum, and Chuang Gan. Planning with large language models for code generation. arXiv [cs.LG], 2023. URL http://arxiv.org/abs/2303.05510." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 474, + 506, + 509 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 474, + 506, + 509 + ], + "spans": [ + { + "bbox": [ + 105, + 474, + 506, + 509 + ], + "type": "text", + "content": "Yunxiang Zhang, Muhammad Khalifa, Lajanugen Logeswaran, Jaekyeom Kim, Moontae Lee, Honglak Lee, and Lu Wang. Small language models need strong verifiers to self-correct reasoning. arXiv [cs.CL], 2024. URL http://arxiv.org/abs/2404.17140." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 515, + 506, + 549 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 515, + 506, + 549 + ], + "spans": [ + { + "bbox": [ + 105, + 515, + 506, + 549 + ], + "type": "text", + "content": "Andy Zhou, Kai Yan, Michal Shlapentokh-Rothman, Haohan Wang, and Yu-Xiong Wang. Language agent tree search unifies reasoning acting and planning in language models. arXiv [cs.AI], 2023. URL http://arxiv.org/abs/2310.04406." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 130, + 93, + 296, + 219 + ], + "blocks": [ + { + "bbox": [ + 141, + 81, + 306, + 92 + ], + "lines": [ + { + "bbox": [ + 141, + 81, + 306, + 92 + ], + "spans": [ + { + "bbox": [ + 141, + 81, + 306, + 92 + ], + "type": "text", + "content": "Math-500 Performance vs. Test Compute" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 130, + 93, + 296, + 219 + ], + "lines": [ + { + "bbox": [ + 130, + 93, + 296, + 219 + ], + "spans": [ + { + "bbox": [ + 130, + 93, + 296, + 219 + ], + "type": "image", + "image_path": "c3231bcd3f7a4377e0bb50797ec0d78ce075ede1cd66eeda2dce0e11b6a62c45.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 310, + 92, + 476, + 218 + ], + "blocks": [ + { + "bbox": [ + 329, + 81, + 478, + 91 + ], + "lines": [ + { + "bbox": [ + 329, + 81, + 478, + 91 + ], + "spans": [ + { + "bbox": [ + 329, + 81, + 478, + 91 + ], + "type": "text", + "content": "GPQA Performance vs. Test Compute" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 310, + 92, + 476, + 218 + ], + "lines": [ + { + "bbox": [ + 310, + 92, + 476, + 218 + ], + "spans": [ + { + "bbox": [ + 310, + 92, + 476, + 218 + ], + "type": "image", + "image_path": "90573dfe71e77f9da3d2327c2e68b2211bc164de4f3632147e1b10f7612d2631.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 228, + 506, + 306 + ], + "lines": [ + { + "bbox": [ + 104, + 228, + 506, + 306 + ], + "spans": [ + { + "bbox": [ + 104, + 228, + 506, + 306 + ], + "type": "text", + "content": "Figure 7: Evaluating backtracking on real LLMs. Left: On MATH-500, we compare the S1 model (fine-tuned on backtracking traces) using sequential decoding with budget forcing, against its base model (Qwen2.5-32B-Instruct) using parallel sampling. The backtracking model underperforms at low compute but narrows the gap at higher budgets. Right: On GPQA, the same backtracking setup outperforms parallel sampling in a multiple-choice reasoning setting. This comparison generalizes our conclusion from synthetic settings to real LLMs." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 326, + 425, + 341 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 326, + 425, + 341 + ], + "spans": [ + { + "bbox": [ + 105, + 326, + 425, + 341 + ], + "type": "text", + "content": "A Backtracking Analysis on Math Reasoning with LLMs" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 346, + 224, + 359 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 346, + 224, + 359 + ], + "spans": [ + { + "bbox": [ + 105, + 346, + 224, + 359 + ], + "type": "text", + "content": "A.1 Experimental Setup" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 361, + 506, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 361, + 506, + 396 + ], + "spans": [ + { + "bbox": [ + 104, + 361, + 506, + 396 + ], + "type": "text", + "content": "To complement our synthetic experiments, we conduct an evaluation on real-world math problems to examine whether backtracking remains effective under equal test-time compute. We compare two approaches:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 405, + 504, + 452 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 108, + 405, + 504, + 427 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 405, + 504, + 427 + ], + "spans": [ + { + "bbox": [ + 108, + 405, + 504, + 427 + ], + "type": "text", + "content": "- Backtracking model: fine-tuned on solution traces that include explicit self-correction and step-by-step reflection." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 429, + 504, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 429, + 504, + 452 + ], + "spans": [ + { + "bbox": [ + 107, + 429, + 504, + 452 + ], + "type": "text", + "content": "- Direct solution model: the base model without backtracking fine-tuning, using parallel sampling (with majority voting for final correct answer) at inference." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 462, + 506, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 462, + 506, + 497 + ], + "spans": [ + { + "bbox": [ + 104, + 462, + 506, + 497 + ], + "type": "text", + "content": "To control test-time compute, we use the budget forcing technique introduced in (Muen-nighoff et al., 2025). This enables a fair comparison across models with differing reasoning styles." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 502, + 505, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 502, + 505, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 502, + 505, + 548 + ], + "type": "text", + "content": "Backtracking Model. We adopt the S1 checkpoint from Muennighoff et al. (2025), a model trained on solution traces distilled from DeepSeekR1. These traces exhibit explicit backtracking behaviors—identifying and correcting earlier mistakes. We generate outputs with temperature " + }, + { + "bbox": [ + 104, + 502, + 505, + 548 + ], + "type": "inline_equation", + "content": "T = 0.7" + }, + { + "bbox": [ + 104, + 502, + 505, + 548 + ], + "type": "text", + "content": " under budget forcing and evaluate on the MATH-500." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 554, + 507, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 554, + 507, + 589 + ], + "spans": [ + { + "bbox": [ + 104, + 554, + 507, + 589 + ], + "type": "text", + "content": "Direct Solution Model. For fair comparison, we use the same base model as S1—Qwen2.5-32B-Instruct—without backtracking fine-tuning. We sample " + }, + { + "bbox": [ + 104, + 554, + 507, + 589 + ], + "type": "inline_equation", + "content": "N = 1" + }, + { + "bbox": [ + 104, + 554, + 507, + 589 + ], + "type": "text", + "content": " to 8 completions with temperature " + }, + { + "bbox": [ + 104, + 554, + 507, + 589 + ], + "type": "inline_equation", + "content": "T = 0.7" + }, + { + "bbox": [ + 104, + 554, + 507, + 589 + ], + "type": "text", + "content": ", and report both Best-of-" + }, + { + "bbox": [ + 104, + 554, + 507, + 589 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 554, + 507, + 589 + ], + "type": "text", + "content": " and Majority Vote accuracy." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 596, + 228, + 609 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 596, + 228, + 609 + ], + "spans": [ + { + "bbox": [ + 105, + 596, + 228, + 609 + ], + "type": "text", + "content": "Results and Interpretation" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 611, + 505, + 678 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 611, + 505, + 678 + ], + "spans": [ + { + "bbox": [ + 104, + 611, + 505, + 678 + ], + "type": "text", + "content": "Figure 7 (left) presents accuracy under matched compute budgets. We observe that at low compute budgets the backtracking model underperforms due to its verbose reasoning traces. At higher budgets, backtracking matches and slightly exceeds the performance of parallel sampling. This mirrors trends observed in the CountDown (Section 4.3), and suggests that while backtracking introduces overhead, it yields benefits when sufficient compute is available." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 687, + 506, + 733 + ], + "type": "text", + "content": "To form a sharp contrast, we reproduce results from (Muennighoff et al., 2025) on GPQA-Diamond (Figure 7, right), which shows that the same backtracking model significantly outperforms parallel sampling—even at lower budgets—in a multiple-choice setting. This contrast highlights that the effectiveness of backtracking is task-dependent." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 140 + ], + "type": "text", + "content": "This real-world evaluation supports our synthetic findings: backtracking improves performance under compute constraints, but its advantage depends on the task structure. On open-ended math problems, the benefit is most pronounced at higher budgets. On structured tasks like multiple-choice QA, gains can appear even earlier. Overall, our conclusions generalize beyond synthetic settings." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 148, + 257, + 161 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 148, + 257, + 161 + ], + "spans": [ + { + "bbox": [ + 105, + 148, + 257, + 161 + ], + "type": "text", + "content": "B Related Work Extended" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 167, + 270, + 180 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 167, + 270, + 180 + ], + "spans": [ + { + "bbox": [ + 105, + 167, + 270, + 180 + ], + "type": "text", + "content": "B.1 Test-time computation scaling" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 182, + 506, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 182, + 506, + 260 + ], + "spans": [ + { + "bbox": [ + 104, + 182, + 506, + 260 + ], + "type": "text", + "content": "A growing body of work has explored how to improve language model performance by scaling test-time computation. These approaches typically fall into two broad categories: parallel and sequential search. Parallel methods sample multiple solutions independently and select the best one using predefined criteria—such as majority voting or external reward models—as seen in Best-of-" + }, + { + "bbox": [ + 104, + 182, + 506, + 260 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 182, + 506, + 260 + ], + "type": "text", + "content": " techniques (Brown et al., 2024; Irvine et al., 2023; Levi, 2024). These methods often rely on outcome-based reward models that score complete solutions (Xin et al., 2024; Ankner et al., 2024)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 269, + 506, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 269, + 506, + 360 + ], + "spans": [ + { + "bbox": [ + 104, + 269, + 506, + 360 + ], + "type": "text", + "content": "In contrast, sequential methods iteratively refine reasoning by conditioning on previous attempts. This class includes stepwise improvement methods(Ankner et al., 2024; Hou et al., 2025; Lee et al., 2025), where each new trajectory builds on earlier outputs, enabling the model to adapt its reasoning dynamically. Other research works have also explored using the search process itself to improve model reasoning capabilities, either during inference or by integrating the feedback into training(Wang et al., 2024; Luo et al., 2024). While these methods can reduce redundancy, they typically require more compute per sample and may suffer from compounding errors." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 366, + 506, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 366, + 506, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 366, + 506, + 456 + ], + "type": "text", + "content": "Tree-based approaches, such as Monte Carlo Tree Search (MCTS) and guided beam search, represent a hybrid between parallel and sequential strategies(Gandhi et al., 2024; Liu et al., 2023; Zhang et al., 2023; Zhou et al., 2023; Choi et al., 2023; Xie et al., 2023). These methods often leverage process reward models, which assign value to intermediate reasoning steps rather than full outputs(Lightman et al., 2023; Wang et al., 2024; Wu et al., 2024). REBASE(Wu et al., 2024), for example, uses a process reward model to guide exploration and pruning in tree search, and has been shown to outperform both sampling-based methods and traditional MCTS." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 463, + 281, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 463, + 281, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 463, + 281, + 475 + ], + "type": "text", + "content": "B.2 Self-correction and backtracking" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 477, + 507, + 621 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 477, + 507, + 621 + ], + "spans": [ + { + "bbox": [ + 104, + 477, + 507, + 621 + ], + "type": "text", + "content": "Search and backtracking are inherently tied to self-correction, as they enable models to revisit earlier decisions and recover from errors—a critical capability for multi-step reasoning. Teaching language models to self-correct has been approached through fine-tuning on revision demonstrations from humans or stronger models(Saunders et al., 2022; Ye et al., 2023; Qu et al., 2024), as well as through synthetic data generation and handcrafted augmentation(Paul et al., 2023; Wang et al., 2023; Lee et al., 2023). Reward-based methods provide another avenue, using outcome- or process-level signals to differentiate good and bad reasoning trajectories, often framed as implicit policy learning(Welleck et al., 2022; Akyurek et al., 2023; Zhang et al., 2024). Some methods further incorporate search, critique generation, or separate correction modules to enhance reasoning quality(Yao et al., 2023b; Havrilla et al., 2024). In contrast, using two structured games, we investigate the tradeoffs of teaching models to backtrack via search traces versus allowing them to learn purely from correct solutions." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 628, + 329, + 641 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 628, + 329, + 641 + ], + "spans": [ + { + "bbox": [ + 105, + 628, + 329, + 641 + ], + "type": "text", + "content": "B.3 Reinforcement learning for LLM reasoning" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 643, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 643, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 643, + 506, + 734 + ], + "type": "text", + "content": "Reinforcement learning (RL) has emerged as a powerful framework for improving the reasoning abilities of language models. While early work applied off-policy and on-policy RL methods to guide models toward verifiable outcomes(Zelikman et al., 2022; Kazemnejad et al., 2024), recent approaches have shown that even simplified algorithms like GRPO can lead to significant performance gains and the emergence of in-context search behavior (DeepSeek-AI et al., 2025; Shao et al., 2024; DeepSeek-AI et al., 2025). These advances suggest that RL can help models autonomously discover more effective reasoning strategies, even without explicit reward models or structured search. However, not all models benefit" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 751, + 311, + 761 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 128 + ], + "type": "text", + "content": "equally from RL, and it remains unclear what properties make a model amenable to learning through reinforcement. Our work contributes to this question by examining how backtracking models, when trained with GRPO, can discover novel solution strategies—while no-backtracking models show limited or mixed gains." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 139, + 233, + 153 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 139, + 233, + 153 + ], + "spans": [ + { + "bbox": [ + 104, + 139, + 233, + 153 + ], + "type": "text", + "content": "C Experiment details" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 159, + 332, + 172 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 159, + 332, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 159, + 332, + 172 + ], + "type": "text", + "content": "C.1 Additional details on game, data generation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 175, + 506, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 175, + 506, + 233 + ], + "spans": [ + { + "bbox": [ + 104, + 175, + 506, + 233 + ], + "type": "text", + "content": "CountDown tree size computation. CountDown has an exponentially growing search space with respect to the number of candidate numbers. If the current state has " + }, + { + "bbox": [ + 104, + 175, + 506, + 233 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 175, + 506, + 233 + ], + "type": "text", + "content": " available numbers, there are " + }, + { + "bbox": [ + 104, + 175, + 506, + 233 + ], + "type": "inline_equation", + "content": "\\binom{N}{2} \\times 4" + }, + { + "bbox": [ + 104, + 175, + 506, + 233 + ], + "type": "text", + "content": " possible actions (selecting a pair and one of four operations), and the depth of the tree is " + }, + { + "bbox": [ + 104, + 175, + 506, + 233 + ], + "type": "inline_equation", + "content": "N - 1" + }, + { + "bbox": [ + 104, + 175, + 506, + 233 + ], + "type": "text", + "content": ". For games with four candidate numbers, the complete search tree contains 1,152 nodes." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 241, + 504, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 241, + 504, + 331 + ], + "spans": [ + { + "bbox": [ + 104, + 241, + 504, + 331 + ], + "type": "text", + "content": "CountDown search. To generate DFS search data, we use a sum heuristic to guide the search order and prune nodes. This heuristic measures the distance between the sum of all input numbers and the target number, and prunes a node if the absolute distance exceeds the target. This approach is inspired by Gandhi et al. (2024), who also consider an alternative—the multiply heuristic—which measures the minimum distance between the input set and the factors of the target. However, in our experiments, both heuristics yield similar performance: for a fixed number of visited nodes, DFS with either heuristic solves approximately the same number of games." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 338, + 504, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 338, + 504, + 394 + ], + "spans": [ + { + "bbox": [ + 104, + 338, + 504, + 394 + ], + "type": "text", + "content": "Sudoku rule. In a Sudoku game, the player is given a " + }, + { + "bbox": [ + 104, + 338, + 504, + 394 + ], + "type": "inline_equation", + "content": "9 \\times 9" + }, + { + "bbox": [ + 104, + 338, + 504, + 394 + ], + "type": "text", + "content": " grid in which each cell must be filled with a digit from 1 to 9. The puzzle is subject to three constraints: each row, each column, and each of the nine " + }, + { + "bbox": [ + 104, + 338, + 504, + 394 + ], + "type": "inline_equation", + "content": "3 \\times 3" + }, + { + "bbox": [ + 104, + 338, + 504, + 394 + ], + "type": "text", + "content": " subgrids must contain all digits from 1 to 9 exactly once. Given a partially filled grid, the objective is to fill in the remaining cells such that all constraints are satisfied." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 402, + 506, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 402, + 506, + 469 + ], + "spans": [ + { + "bbox": [ + 104, + 402, + 506, + 469 + ], + "type": "text", + "content": "Sudoku data and tokenization. To represent theiku board for language models, we encode each cell as a position-value pair: " + }, + { + "bbox": [ + 104, + 402, + 506, + 469 + ], + "type": "inline_equation", + "content": "(x,y) = v" + }, + { + "bbox": [ + 104, + 402, + 506, + 469 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 402, + 506, + 469 + ], + "type": "inline_equation", + "content": "(x,y)" + }, + { + "bbox": [ + 104, + 402, + 506, + 469 + ], + "type": "text", + "content": " denotes the grid location and " + }, + { + "bbox": [ + 104, + 402, + 506, + 469 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 402, + 506, + 469 + ], + "type": "text", + "content": " is the cell's value. The model receives the initial board as a list of known " + }, + { + "bbox": [ + 104, + 402, + 506, + 469 + ], + "type": "inline_equation", + "content": "(x,y) = v" + }, + { + "bbox": [ + 104, + 402, + 506, + 469 + ], + "type": "text", + "content": " pairs and generates the solution by predicting the values for the remaining cells. We generate backtracking traces by serializing the full DFS traversal. For the direct solution model, we prune each trace to include only the final solution path." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 476, + 504, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 476, + 504, + 533 + ], + "spans": [ + { + "bbox": [ + 104, + 476, + 504, + 533 + ], + "type": "text", + "content": "Scoring. For CountDown, a solution is correct only if it adheres to game rules and correctly achieves the target number. For Selenium, correctness requires fully solving the board, with no partial credit given for incomplete but correct boards. Models are tested on 200 unseen problems per game. The same scoring function is used as the reward function in GRPO (Section 6)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 541, + 318, + 553 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 318, + 553 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 318, + 553 + ], + "type": "text", + "content": "C.2 Additional details on model architecture" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 557, + 321, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 557, + 321, + 570 + ], + "spans": [ + { + "bbox": [ + 104, + 557, + 321, + 570 + ], + "type": "text", + "content": "Model hyperparameters can be found in Table 1." + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 115, + 582, + 496, + 651 + ], + "blocks": [ + { + "bbox": [ + 115, + 582, + 496, + 651 + ], + "lines": [ + { + "bbox": [ + 115, + 582, + 496, + 651 + ], + "spans": [ + { + "bbox": [ + 115, + 582, + 496, + 651 + ], + "type": "table", + "html": "
Model SizeHidden SizeLayersAttn HeadsIntermediate SizeKV Heads
3M256645121
17M5128410241
38M51210820482
144M102412830722
", + "image_path": "6d58a67cb14836a8528234812303ed4b872557173c0aaf09e6fd2050fe82eb1f.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 658, + 504, + 681 + ], + "lines": [ + { + "bbox": [ + 104, + 658, + 504, + 681 + ], + "spans": [ + { + "bbox": [ + 104, + 658, + 504, + 681 + ], + "type": "text", + "content": "Table 1: Qwen2.5-style architecture configurations for the four model sizes used in our experiments." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 693, + 249, + 706 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 693, + 249, + 706 + ], + "spans": [ + { + "bbox": [ + 104, + 693, + 249, + 706 + ], + "type": "text", + "content": "C.3 Training hyperparameter" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 104, + 708, + 504, + 731 + ], + "type": "text", + "content": "Training hyperparameters can be found in Table 2. We train all models on 2 NVIDIA H100 80GB HBM3 GPUs." + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 194, + 80, + 416, + 270 + ], + "blocks": [ + { + "bbox": [ + 194, + 80, + 416, + 270 + ], + "lines": [ + { + "bbox": [ + 194, + 80, + 416, + 270 + ], + "spans": [ + { + "bbox": [ + 194, + 80, + 416, + 270 + ], + "type": "table", + "html": "
HyperparameterValue
Optimization
Learning rate1 × 10-5
Weight decay0.01
Learning Rate Schedule
Scheduler typeCosine
Warmup steps1
Training Setup
Epochs30
Batch size (backtracking model)32
Batch size (direct solution model)64
Context length (backtracking model)4096
Context length (direct solution model)512
Tokenizer
Tokenizer size (CountDown)74
Tokenizer size (Sudoku)110
", + "image_path": "69ac0e6e576a49a10af498ffd099c72ba54c2b238e83d3d324692aeb4a963b1e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 277, + 504, + 301 + ], + "lines": [ + { + "bbox": [ + 104, + 277, + 504, + 301 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 504, + 301 + ], + "type": "text", + "content": "Table 2: Training hyperparameters used for all experiments. Batch size and context length vary based on model type." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 319, + 234, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 319, + 234, + 333 + ], + "spans": [ + { + "bbox": [ + 105, + 319, + 234, + 333 + ], + "type": "text", + "content": "D FLOP computation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 338, + 506, + 382 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 338, + 506, + 382 + ], + "spans": [ + { + "bbox": [ + 104, + 338, + 506, + 382 + ], + "type": "text", + "content": "To compare backtracking and direct solution models under a fixed compute budget, we estimate inference FLOPs based on model architecture and generation length " + }, + { + "bbox": [ + 104, + 338, + 506, + 382 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 338, + 506, + 382 + ], + "type": "text", + "content": ". We use a simplified transformer FLOP computation that accounts for per-token operations across all layers." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 391, + 362, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 391, + 362, + 404 + ], + "spans": [ + { + "bbox": [ + 105, + 391, + 362, + 404 + ], + "type": "text", + "content": "Below is a list of architectural and generation parameters:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 108, + 412, + 400, + 491 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 108, + 412, + 235, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 412, + 235, + 424 + ], + "spans": [ + { + "bbox": [ + 108, + 412, + 235, + 424 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 108, + 412, + 235, + 424 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{model}}" + }, + { + "bbox": [ + 108, + 412, + 235, + 424 + ], + "type": "text", + "content": ": hidden dimension" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 108, + 426, + 242, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 426, + 242, + 438 + ], + "spans": [ + { + "bbox": [ + 108, + 426, + 242, + 438 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{kv}}" + }, + { + "bbox": [ + 108, + 426, + 242, + 438 + ], + "type": "text", + "content": " : key/value dimension" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 108, + 439, + 306, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 439, + 306, + 452 + ], + "spans": [ + { + "bbox": [ + 108, + 439, + 306, + 452 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 108, + 439, + 306, + 452 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{ff}}" + }, + { + "bbox": [ + 108, + 439, + 306, + 452 + ], + "type": "text", + "content": ": intermediate (feedforward) dimension" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 108, + 453, + 208, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 453, + 208, + 464 + ], + "spans": [ + { + "bbox": [ + 108, + 453, + 208, + 464 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 108, + 453, + 208, + 464 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 108, + 453, + 208, + 464 + ], + "type": "text", + "content": " : number of layers" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 108, + 465, + 347, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 465, + 347, + 478 + ], + "spans": [ + { + "bbox": [ + 108, + 465, + 347, + 478 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 108, + 465, + 347, + 478 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 108, + 465, + 347, + 478 + ], + "type": "text", + "content": ": number of generated tokens (i.e., context length)" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 108, + 479, + 400, + 491 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 479, + 400, + 491 + ], + "spans": [ + { + "bbox": [ + 108, + 479, + 400, + 491 + ], + "type": "text", + "content": "- " + }, + { + "bbox": [ + 108, + 479, + 400, + 491 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 108, + 479, + 400, + 491 + ], + "type": "text", + "content": ": number of sequences generated (e.g., in best-of-" + }, + { + "bbox": [ + 108, + 479, + 400, + 491 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 108, + 479, + 400, + 491 + ], + "type": "text", + "content": " sampling)" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 497, + 280, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 497, + 280, + 510 + ], + "spans": [ + { + "bbox": [ + 105, + 497, + 280, + 510 + ], + "type": "text", + "content": "D.1 Step-by-step FLOPs Calculation" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 512, + 504, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 512, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 512, + 504, + 536 + ], + "type": "text", + "content": "1. Per-layer linear FLOPs per token. We break down the linear FLOPs for each transformer layer into attention and MLP components:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 108, + 544, + 186, + 555 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 544, + 186, + 555 + ], + "spans": [ + { + "bbox": [ + 108, + 544, + 186, + 555 + ], + "type": "text", + "content": "Self-attention:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 128, + 555, + 290, + 601 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 128, + 555, + 287, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 555, + 287, + 568 + ], + "spans": [ + { + "bbox": [ + 128, + 555, + 287, + 568 + ], + "type": "text", + "content": "- Query projection: " + }, + { + "bbox": [ + 128, + 555, + 287, + 568 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{model}} \\times d_{\\mathrm{model}}" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 129, + 568, + 264, + 579 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 568, + 264, + 579 + ], + "spans": [ + { + "bbox": [ + 129, + 568, + 264, + 579 + ], + "type": "text", + "content": "- Key projection: " + }, + { + "bbox": [ + 129, + 568, + 264, + 579 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{model}} \\times d_{\\mathrm{kV}}" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 129, + 579, + 271, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 579, + 271, + 590 + ], + "spans": [ + { + "bbox": [ + 129, + 579, + 271, + 590 + ], + "type": "text", + "content": "- Value projection: " + }, + { + "bbox": [ + 129, + 579, + 271, + 590 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{model}} \\times d_{\\mathrm{kv}}" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 129, + 590, + 290, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 590, + 290, + 601 + ], + "spans": [ + { + "bbox": [ + 129, + 590, + 290, + 601 + ], + "type": "text", + "content": "- Output projection: " + }, + { + "bbox": [ + 129, + 590, + 290, + 601 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{model}} \\times d_{\\mathrm{model}}" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 118, + 601, + 227, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 601, + 227, + 610 + ], + "spans": [ + { + "bbox": [ + 118, + 601, + 227, + 610 + ], + "type": "text", + "content": "This results in a total of:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 216, + 613, + 405, + 628 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 613, + 405, + 628 + ], + "spans": [ + { + "bbox": [ + 216, + 613, + 405, + 628 + ], + "type": "interline_equation", + "content": "\\mathrm {F L O P s} _ {\\text {a t t e n t i o n - l i n e a r}} = 2 d _ {\\text {m o d e l}} ^ {2} + 2 d _ {\\text {m o d e l}} d _ {\\mathrm {k v}}", + "image_path": "574ecd7a6e5556ba5ba626aeade30078f52b3ad8e6a2dee475b386af00289639.jpg" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 108, + 630, + 213, + 640 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 630, + 213, + 640 + ], + "spans": [ + { + "bbox": [ + 108, + 630, + 213, + 640 + ], + "type": "text", + "content": "MLP (Feedforward):" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 118, + 642, + 310, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 642, + 310, + 653 + ], + "spans": [ + { + "bbox": [ + 118, + 642, + 310, + 653 + ], + "type": "text", + "content": "MLP layers include following components:" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 121, + 653, + 206, + 689 + ], + "type": "list", + "angle": 0, + "index": 28, + "blocks": [ + { + "bbox": [ + 121, + 653, + 200, + 664 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 653, + 200, + 664 + ], + "spans": [ + { + "bbox": [ + 121, + 653, + 200, + 664 + ], + "type": "text", + "content": "- Gate projection" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 121, + 666, + 193, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 666, + 193, + 677 + ], + "spans": [ + { + "bbox": [ + 121, + 666, + 193, + 677 + ], + "type": "text", + "content": "- Up projection" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 121, + 678, + 206, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 678, + 206, + 689 + ], + "spans": [ + { + "bbox": [ + 121, + 678, + 206, + 689 + ], + "type": "text", + "content": "- Down projection" + } + ] + } + ], + "index": 27 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 118, + 689, + 347, + 701 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 689, + 347, + 701 + ], + "spans": [ + { + "bbox": [ + 118, + 689, + 347, + 701 + ], + "type": "text", + "content": "Each of these MLP layers costs: " + }, + { + "bbox": [ + 118, + 689, + 347, + 701 + ], + "type": "inline_equation", + "content": "d_{\\mathrm{model}} \\times d_{\\mathrm{ff}}" + }, + { + "bbox": [ + 118, + 689, + 347, + 701 + ], + "type": "text", + "content": ", giving:" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 260, + 704, + 363, + 718 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 260, + 704, + 363, + 718 + ], + "spans": [ + { + "bbox": [ + 260, + 704, + 363, + 718 + ], + "type": "interline_equation", + "content": "\\mathrm {F L O P s} _ {\\mathrm {m l p}} = 3 d _ {\\mathrm {m o d e l}} d _ {\\mathrm {f f}}", + "image_path": "8130329fe5d7d8a2269feb52adbcbdbdac3a6312e097ab867dacdcd5c5d9099c.jpg" + } + ] + } + ], + "index": 30 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 25, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 719, + 498, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 719, + 498, + 733 + ], + "spans": [ + { + "bbox": [ + 116, + 719, + 498, + 733 + ], + "type": "inline_equation", + "content": "{}^{2}" + }, + { + "bbox": [ + 116, + 719, + 498, + 733 + ], + "type": "text", + "content": " key/value dimension is different from hidden dimension because of GQA (Ainslie et al., 2023)" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 32 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 425, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 425, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 425, + 95 + ], + "type": "text", + "content": "Combining both components, the total per-token linear cost per layer is:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 198, + 101, + 411, + 117 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 101, + 411, + 117 + ], + "spans": [ + { + "bbox": [ + 198, + 101, + 411, + 117 + ], + "type": "interline_equation", + "content": "\\mathrm {F L O P s} _ {\\mathrm {l i n e a r}} = 2 d _ {\\mathrm {m o d e l}} ^ {2} + 2 d _ {\\mathrm {m o d e l}} d _ {\\mathrm {k v}} + 3 d _ {\\mathrm {m o d e l}} d _ {\\mathrm {f f}}", + "image_path": "5e892c3a71230aa82910617527ddd4696a13bfb4936334f3110ec7c974dc70b3.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 128, + 504, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 128, + 504, + 152 + ], + "spans": [ + { + "bbox": [ + 104, + 128, + 504, + 152 + ], + "type": "text", + "content": "2. Quadratic attention cost. Self-attention involves computing interactions between all token pairs, resulting in a quadratic cost:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 227, + 157, + 381, + 182 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 227, + 157, + 381, + 182 + ], + "spans": [ + { + "bbox": [ + 227, + 157, + 381, + 182 + ], + "type": "interline_equation", + "content": "\\mathrm {F L O P s} _ {\\text {a t t e n t i o n}} = d _ {\\text {m o d e l}} \\cdot \\frac {T (T + 1)}{2}", + "image_path": "80d09d2845a5fe640d41cafb42f29595802eafe4a00ac34ac72dcec1f68b6798.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 194, + 504, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 194, + 504, + 217 + ], + "spans": [ + { + "bbox": [ + 104, + 194, + 504, + 217 + ], + "type": "text", + "content": "3. Total generation cost per sequence. Each token attends to all previous tokens across all " + }, + { + "bbox": [ + 104, + 194, + 504, + 217 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 194, + 504, + 217 + ], + "type": "text", + "content": " layers. The generation cost for a single sequence is:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 195, + 223, + 414, + 237 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 195, + 223, + 414, + 237 + ], + "spans": [ + { + "bbox": [ + 195, + 223, + 414, + 237 + ], + "type": "interline_equation", + "content": "\\mathrm {F L O P s} _ {\\mathrm {g e n}} = L \\cdot \\left(\\mathrm {F L O P s} _ {\\mathrm {l i n e a r}} \\cdot T + \\mathrm {F L O P s} _ {\\mathrm {a t t e n t i o n}}\\right)", + "image_path": "45c3d566b5fe3c88b3258233a8b99a35829544e5efacedbb18a9463cabd9d636.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 248, + 504, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 248, + 504, + 270 + ], + "spans": [ + { + "bbox": [ + 104, + 248, + 504, + 270 + ], + "type": "text", + "content": "4. Total inference FLOPs. For " + }, + { + "bbox": [ + 104, + 248, + 504, + 270 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 248, + 504, + 270 + ], + "type": "text", + "content": " sequences (e.g., best-of- " + }, + { + "bbox": [ + 104, + 248, + 504, + 270 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 248, + 504, + 270 + ], + "type": "text", + "content": " sampling), the total inference cost is:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 244, + 270, + 366, + 285 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 270, + 366, + 285 + ], + "spans": [ + { + "bbox": [ + 244, + 270, + 366, + 285 + ], + "type": "interline_equation", + "content": "\\mathrm {F L O P s} _ {\\text {t o t a l}} = N \\cdot \\mathrm {F L O P s} _ {\\text {g e n}}", + "image_path": "456bf56f639912bcebe5fe77df3d7949409e1d805af59d1b5cd9f8ffe0142eef.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 296, + 506, + 331 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 296, + 506, + 331 + ], + "spans": [ + { + "bbox": [ + 104, + 296, + 506, + 331 + ], + "type": "text", + "content": "We do not include auxiliary operations such as token embedding and softmax, weight norm, as their contribution is negligible compared to the transformer layers. All FLOPs reported in our experiments use this formula, with model configurations listed in Table 1." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 340, + 304, + 354 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 340, + 304, + 354 + ], + "spans": [ + { + "bbox": [ + 105, + 340, + 304, + 354 + ], + "type": "text", + "content": "E Majority voting versus best-of-n" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 359, + 506, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 359, + 506, + 417 + ], + "spans": [ + { + "bbox": [ + 104, + 359, + 506, + 417 + ], + "type": "text", + "content": "In this work, we primarily use the best-of-" + }, + { + "bbox": [ + 104, + 359, + 506, + 417 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 359, + 506, + 417 + ], + "type": "text", + "content": " metric to evaluate the direct solution model. This metric is suitable for tasks where verifying the correctness of a solution is trivial, whereas solving the task itself is challenging. Many real-world problems, such as coding tasks and combinatorial optimization, fall into this category. Conversely, for problems where verification is difficult, metrics such as majority voting may be more appropriate." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 424, + 506, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 424, + 506, + 469 + ], + "spans": [ + { + "bbox": [ + 104, + 424, + 506, + 469 + ], + "type": "text", + "content": "To illustrate this point, we additionally evaluate the CountDown direct solution model using both metrics in Figure 8. For majority voting, we generate n solutions per test problem, select the most frequently occurring solution (breaking ties randomly), and evaluate its correctness." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 478, + 506, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 478, + 506, + 590 + ], + "spans": [ + { + "bbox": [ + 104, + 478, + 506, + 590 + ], + "type": "text", + "content": "We find that the majority-voting performance closely approximates the direct solution model's one-shot accuracy (i.e., best-of- " + }, + { + "bbox": [ + 104, + 478, + 506, + 590 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 478, + 506, + 590 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 104, + 478, + 506, + 590 + ], + "type": "inline_equation", + "content": "n = 1" + }, + { + "bbox": [ + 104, + 478, + 506, + 590 + ], + "type": "text", + "content": "). However, majority voting is less suitable for our task for several reasons. First, the CountDown game frequently has multiple correct solutions, so selecting the majority solution path can fail to detect cases where the model generates different but equally valid solutions. Second, while majority voting is appropriate in real-world LLM scenarios—such as mathematical reasoning—where distinct solution paths converge to the same final boxed answer, in our synthetic setting, where models are trained from scratch, majority voting essentially becomes a noisy proxy for greedy decoding (sampling at temperature " + }, + { + "bbox": [ + 104, + 478, + 506, + 590 + ], + "type": "inline_equation", + "content": "T = 0" + }, + { + "bbox": [ + 104, + 478, + 506, + 590 + ], + "type": "text", + "content": "). Thus, we expect and observe majority voting accuracy to closely track pass@1 accuracy." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 597, + 506, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 597, + 506, + 622 + ], + "spans": [ + { + "bbox": [ + 104, + 597, + 506, + 622 + ], + "type": "text", + "content": "In summary, given the characteristics of our task and the controlled experimental setup, best-of-" + }, + { + "bbox": [ + 104, + 597, + 506, + 622 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 597, + 506, + 622 + ], + "type": "text", + "content": " remains a valid and preferred metric for evaluating direct solution models." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 631, + 343, + 646 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 631, + 343, + 646 + ], + "spans": [ + { + "bbox": [ + 104, + 631, + 343, + 646 + ], + "type": "text", + "content": "F Dependence on depth of the search tree" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 651, + 211, + 663 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 651, + 211, + 663 + ], + "spans": [ + { + "bbox": [ + 105, + 651, + 211, + 663 + ], + "type": "text", + "content": "F.1 Search tree depth" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 665, + 507, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 507, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 507, + 734 + ], + "type": "text", + "content": "Why do backtracking models perform well on Selenium but underperform on CountDown, even when both are trained on DFS search traces? We argue that task characteristics—particularly those beyond our control in real—world settings—play a key role in determining whether backtracking is test-time-compute-efficient. A major difference between the two games lies in the depth of their search trees (Figure 2). In hard Selenium puzzles, only 20 out of 81 cells are pre-filled, leaving 50–60 cells to solve. This results in deep search trees with" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 211, + 85, + 400, + 199 + ], + "blocks": [ + { + "bbox": [ + 211, + 85, + 400, + 199 + ], + "lines": [ + { + "bbox": [ + 211, + 85, + 400, + 199 + ], + "spans": [ + { + "bbox": [ + 211, + 85, + 400, + 199 + ], + "type": "image", + "image_path": "932547a1aae0442be0b8ed55b64d894efab339f8a1e422c141b2a42aeb51fdfc.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 213, + 506, + 270 + ], + "lines": [ + { + "bbox": [ + 104, + 213, + 506, + 270 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 506, + 270 + ], + "type": "text", + "content": "Figure 8: Majority voting versus best-of- " + }, + { + "bbox": [ + 104, + 213, + 506, + 270 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 213, + 506, + 270 + ], + "type": "text", + "content": " for CountDown direct solution model. For CountDown, verification is much easier than solving the problem. Therefore, best-of- " + }, + { + "bbox": [ + 104, + 213, + 506, + 270 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 104, + 213, + 506, + 270 + ], + "type": "text", + "content": " as a performance is justified. Additionally, we also examine majority voting performance. However, CountDown solutions are not unique, majority voting is not the most suitable way to measure model performances." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 139, + 282, + 299, + 430 + ], + "blocks": [ + { + "bbox": [ + 139, + 282, + 299, + 430 + ], + "lines": [ + { + "bbox": [ + 139, + 282, + 299, + 430 + ], + "spans": [ + { + "bbox": [ + 139, + 282, + 299, + 430 + ], + "type": "image", + "image_path": "c0806c8c2a5e96f1f7a0026e9c74be9031e96165b9ef739ed5759bc84f66787e.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 439, + 506, + 561 + ], + "lines": [ + { + "bbox": [ + 104, + 439, + 506, + 561 + ], + "spans": [ + { + "bbox": [ + 104, + 439, + 506, + 561 + ], + "type": "text", + "content": "Figure 9: The efficacy of backtracking depends on search tree depth. Left: We introduce a variation of the CountDown game—stacked CountDown—to increase the search tree depth. In the original CountDown game (solid bars), the direct solution model consistently outperforms the backtracking model, shown by a positive performance gap. In the stacked version (slanted bars), this gap is significantly reduced or even reversed, indicating that backtracking becomes more compute-efficient at greater depths. Right: We introduce a variation of Sudoku—easy Sudoku—where the initial board has more pre-filled cells to decrease the search tree depth. In the original setting, the 38M direct solution model (bottom, solid bars) underperforms the backtracking model. In the shallow Sudoku variant (slanted bars), the performance gap narrows across compute budgets. For the 17M models (top), the results are less conclusive." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 312, + 282, + 471, + 430 + ], + "blocks": [ + { + "bbox": [ + 312, + 282, + 471, + 430 + ], + "lines": [ + { + "bbox": [ + 312, + 282, + 471, + 430 + ], + "spans": [ + { + "bbox": [ + 312, + 282, + 471, + 430 + ], + "type": "image", + "image_path": "f6b5c72489b87d26c48b44d4f55f9ca147976d6fd1d61acc75e1b1ed0f2b685f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 584, + 504, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 584, + 504, + 641 + ], + "spans": [ + { + "bbox": [ + 104, + 584, + 504, + 641 + ], + "type": "text", + "content": "extensive trial-and-error, with many backtracking steps. In contrast, CountDown (in our setup) uses 4 candidate numbers, limiting the search tree depth to just 3. We hypothesize that backtracking models excels at tasks with deeper search trees, while shallow trees make parallel strategies (i.e., direct solution model) more effective. To test this, we design a variant of CountDown with increased search depth and a variant of Sudoku with reduced depth." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 650, + 230, + 662 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 650, + 230, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 650, + 230, + 662 + ], + "type": "text", + "content": "F.2 A deeper CountDown" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 506, + 733 + ], + "type": "text", + "content": "Set up To increase the search tree depth in CountDown, one might naively scale up the number of candidate numbers. However, this approach quickly leads to exponential growth in tree width: with 4 candidates, the tree contains 1,152 nodes; with 5 candidates, it grows to 46,080. To prevent the exponential growth in the number of search paths, we design a stacked CountDown variant that increases depth while controlling tree width. In this setup, the player is given 8 candidate numbers and a final target. The first 4 numbers must be used" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 506, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 506, + 150 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 506, + 150 + ], + "type": "text", + "content": "to reach the 5th number (\"a partial goal\"), and the remaining 4 numbers must then be used to reach the final target. This effectively stacks two CountDown problems, increasing depth without combinatorial explosion. We generate training data for both backtracking and no-backtracking models following the same procedure as in Section 3.1.2, with examples provided in Appendix H (Figure 16). We train a 17M as well as a 38M model until validation loss has converged, and test on 200 unseen problems." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 159, + 506, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 159, + 506, + 258 + ], + "spans": [ + { + "bbox": [ + 104, + 159, + 506, + 258 + ], + "type": "text", + "content": "Results In Figure 9 (left), we compare the performance gap between the direct solution model and the backtracking model, measured by the difference in test accuracy. In the original CountDown setting (solid bars), the direct solution model consistently outperforms the backtracking model across all test compute budgets. However, in the stacked CountDown variant (slanted bars), the performance gap narrows significantly—and in some cases, reverses. The sign reverse indicates the backtracking model now outperforms the direct solution model. These results support our hypothesis: in CountDown, backtracking becomes more compute-efficient as the search tree depth increases. We observe this trend across both 17M and 38M models." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 270, + 224, + 281 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 270, + 224, + 281 + ], + "spans": [ + { + "bbox": [ + 105, + 270, + 224, + 281 + ], + "type": "text", + "content": "F.3 A shallower Selenium" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 286, + 506, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 286, + 506, + 365 + ], + "spans": [ + { + "bbox": [ + 104, + 286, + 506, + 365 + ], + "type": "text", + "content": "Set up To reduce the search tree depth in Selenium, we generate easier boards by increasing the number of initially filled cells. Specifically, we take the original 3M Selenium dataset Radcliffe (2020) and apply the direct solution model (Section 3.2.2) to correctly fill 10 additional cells. This increases the average number of pre-filled cells from 20 to around 30, effectively decreasing search tree depth. We generate both backtracking and direct solution training data following the same procedure in Section 3.2.2. Models with 17M and 38M parameters are trained to convergence and evaluated on 200 unseen problems." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 374, + 506, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 374, + 506, + 464 + ], + "spans": [ + { + "bbox": [ + 104, + 374, + 506, + 464 + ], + "type": "text", + "content": "Results In Figure 9 (right), we show the performance gap between the direct solution and backtracking models, measured by the difference in test accuracy. In the original (hard) Sudoku setting, the 38M direct solution model consistently underperforms the backtracking model, as indicated by the negative gaps (solid green bars). In the shallow-Sudoku variant (slanted bars), these gaps are reduced across all test-time compute budgets for the 38M model. The trend is less clear for the 17M model, where the performance difference remains small in both settings. Overall, these results support our hypothesis: in Sudoku, backtracking becomes more test-time-compute-efficient when the search tree is deeper." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 477, + 230, + 490 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 477, + 230, + 490 + ], + "spans": [ + { + "bbox": [ + 105, + 477, + 230, + 490 + ], + "type": "text", + "content": "G Additional results" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 498, + 326, + 511 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 498, + 326, + 511 + ], + "spans": [ + { + "bbox": [ + 105, + 498, + 326, + 511 + ], + "type": "text", + "content": "G.1 Exploring different CountDown strategies" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 515, + 506, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 515, + 506, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 515, + 506, + 594 + ], + "type": "text", + "content": "We analyze different search strategies for CountDown, including DFS and BFS with varying beam widths. For each strategy, we tokenize the resulting backtracking trace and measure number of tokens used in each search trace. The goal is to identify which strategy that finds correct solutions with the fewest tokens (Figure 10). The results show no clear winner. BFS with a smaller beam width produces shorter traces by exploring fewer nodes, but this comes at the cost of missing correct solutions more frequently. Increasing the beam width improves solution coverage but leads to longer traces due to broader exploration." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 604, + 505, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 604, + 505, + 650 + ], + "spans": [ + { + "bbox": [ + 104, + 604, + 505, + 650 + ], + "type": "text", + "content": "In contrast, DFS produces more uniformly distributed trace lengths but suffers from a specific failure mode: it may prune the correct path early and terminate prematurely. These failures appear as short but incorrect traces, visible as the left-most orange bars in Figure 10 (bottom)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 659, + 317, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 659, + 317, + 672 + ], + "spans": [ + { + "bbox": [ + 105, + 659, + 317, + 672 + ], + "type": "text", + "content": "G.2 Compare think-backtrack and backtrack" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 676, + 506, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 676, + 506, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 676, + 506, + 734 + ], + "type": "text", + "content": "Table 3 further shows a confusion matrix comparing the original and think-backtrack models. The backtracking model solves 102 test problems in total with maximum test-time compute budget (4096 tokens). Out of those 102 problems, the think-backtrack model solves most of them. This evidence further shows that by training on shortened search traces, the model learns to internalize parts of its thinking without sacrificing performances." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 167, + 81, + 443, + 316 + ], + "blocks": [ + { + "bbox": [ + 167, + 81, + 443, + 316 + ], + "lines": [ + { + "bbox": [ + 167, + 81, + 443, + 316 + ], + "spans": [ + { + "bbox": [ + 167, + 81, + 443, + 316 + ], + "type": "image", + "image_path": "fc56edd9b4072e1423ef98a3d12b83f40442c91276c3388b7c61b2accbcc2e7f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 327, + 506, + 372 + ], + "lines": [ + { + "bbox": [ + 104, + 327, + 506, + 372 + ], + "spans": [ + { + "bbox": [ + 104, + 327, + 506, + 372 + ], + "type": "text", + "content": "Figure 10: Examine different search strategies for CountDown. Beyond DFS, we experiment with Bread-First-Search (BFS) with different beam widths. We tokenize the search trace and measure the number of tokens as search length. There is not one search algorithm that is optimal to generate both short and correct solution traces." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 205, + 381, + 405, + 430 + ], + "blocks": [ + { + "bbox": [ + 205, + 381, + 405, + 430 + ], + "lines": [ + { + "bbox": [ + 205, + 381, + 405, + 430 + ], + "spans": [ + { + "bbox": [ + 205, + 381, + 405, + 430 + ], + "type": "table", + "html": "
T-B CorrectT-B Incorrect
B Correct8319
B Incorrect4157
", + "image_path": "a5baf5c34e74588f14d8ecbd99a21e83bd733de70d8b276db19b5acc6c40b6b3.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 117, + 437, + 490, + 449 + ], + "lines": [ + { + "bbox": [ + 117, + 437, + 490, + 449 + ], + "spans": [ + { + "bbox": [ + 117, + 437, + 490, + 449 + ], + "type": "text", + "content": "Table 3: Confusion matrix between Think-Backtrack (T-B) and Backtrack (B) models." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 471, + 293, + 483 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 471, + 293, + 483 + ], + "spans": [ + { + "bbox": [ + 104, + 471, + 293, + 483 + ], + "type": "text", + "content": "G.3 Supervised learning training curve" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 485, + 504, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 485, + 504, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 485, + 504, + 529 + ], + "type": "text", + "content": "During training, we set the maximum epochs to 30 epochs and allow early stopping. All models converge before 30 epochs and we early stop training when the validation loss has converged on log-log scale. Figure 11, 12 show the training curve for both models and for CountDown and Selenium." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 537, + 243, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 537, + 243, + 550 + ], + "spans": [ + { + "bbox": [ + 104, + 537, + 243, + 550 + ], + "type": "text", + "content": "G.4 Additional GRPO plots" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 552, + 506, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 552, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 104, + 552, + 506, + 641 + ], + "type": "text", + "content": "In Figure 3 (Section 4.2.1), we used the number of mistakes as a proxy for comparing search strategies. To further demonstrate that the backtracking model fine-tuned with GRPO discovers new strategies, we repeat the same analysis in Figure 13 (right). Compared to the original backtracking model (Figure 13, left), the post-GRPO model solves many problems with a different number of mistakes than the number of mistakes made by DFS. This shift indicates that the model is no longer tightly aligned with the original search trace and has discovered alternative, more diverse solution paths. Figure 6 (left) quantifies the above qualitative observation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 651, + 200, + 666 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 651, + 200, + 666 + ], + "spans": [ + { + "bbox": [ + 105, + 651, + 200, + 666 + ], + "type": "text", + "content": "H Data sample" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 670, + 505, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 670, + 505, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 670, + 505, + 704 + ], + "type": "text", + "content": "Figure 14 shows an example of a CountDown game and the training data. Figure 15 shows an example of a Sudoku game and the training data. Figure 16 shows an example of stacked-CountDown variation and the training data." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 105, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 166, + 131, + 309, + 242 + ], + "blocks": [ + { + "bbox": [ + 166, + 131, + 309, + 242 + ], + "lines": [ + { + "bbox": [ + 166, + 131, + 309, + 242 + ], + "spans": [ + { + "bbox": [ + 166, + 131, + 309, + 242 + ], + "type": "image", + "image_path": "70e27d83371e2162ae2ad3312e8604d72f509e110ba9bf9f581d4ee15f31c6b6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 309, + 132, + 445, + 242 + ], + "blocks": [ + { + "bbox": [ + 309, + 132, + 445, + 242 + ], + "lines": [ + { + "bbox": [ + 309, + 132, + 445, + 242 + ], + "spans": [ + { + "bbox": [ + 309, + 132, + 445, + 242 + ], + "type": "image", + "image_path": "a7f0cb936326cfeac7c67d4002567f49d6d2af471738d349c7d0ed30b32045bb.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 166, + 275, + 306, + 385 + ], + "blocks": [ + { + "bbox": [ + 104, + 250, + 504, + 273 + ], + "lines": [ + { + "bbox": [ + 104, + 250, + 504, + 273 + ], + "spans": [ + { + "bbox": [ + 104, + 250, + 504, + 273 + ], + "type": "text", + "content": "Figure 11: CountDown validation loss. Left: Backtracking model. Right: Direct solution model." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 166, + 275, + 306, + 385 + ], + "lines": [ + { + "bbox": [ + 166, + 275, + 306, + 385 + ], + "spans": [ + { + "bbox": [ + 166, + 275, + 306, + 385 + ], + "type": "image", + "image_path": "8eb41fd414170e86a8c76ead06fd22df26d60ebd1999e2cf7e9f99d3edb8492a.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 309, + 275, + 444, + 385 + ], + "blocks": [ + { + "bbox": [ + 309, + 275, + 444, + 385 + ], + "lines": [ + { + "bbox": [ + 309, + 275, + 444, + 385 + ], + "spans": [ + { + "bbox": [ + 309, + 275, + 444, + 385 + ], + "type": "image", + "image_path": "ae64856c00c49718b08575467901ac63bde0a1130a794f68a3fc2292876c436c.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 105, + 394, + 504, + 407 + ], + "lines": [ + { + "bbox": [ + 105, + 394, + 504, + 407 + ], + "spans": [ + { + "bbox": [ + 105, + 394, + 504, + 407 + ], + "type": "text", + "content": "Figure 12: Sodomu validation loss. Left: Backtracking model. Right: Direct solution model." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 146, + 513, + 302, + 635 + ], + "blocks": [ + { + "bbox": [ + 146, + 513, + 302, + 635 + ], + "lines": [ + { + "bbox": [ + 146, + 513, + 302, + 635 + ], + "spans": [ + { + "bbox": [ + 146, + 513, + 302, + 635 + ], + "type": "image", + "image_path": "926acdc15b5f62a9030e2b341cc566625d663dc62c206a1cfe0ddd4f77598eea.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 644, + 506, + 680 + ], + "lines": [ + { + "bbox": [ + 104, + 644, + 506, + 680 + ], + "spans": [ + { + "bbox": [ + 104, + 644, + 506, + 680 + ], + "type": "text", + "content": "Figure 13: Backtracking model can discover some new search strategies. Post GRPO, the backtracking model discover new strategies: In the right panel, for each problem, the post-GRPO model makes a different number of mistakes compared to DFS trace." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 307, + 513, + 463, + 635 + ], + "blocks": [ + { + "bbox": [ + 307, + 513, + 463, + 635 + ], + "lines": [ + { + "bbox": [ + 307, + 513, + 463, + 635 + ], + "spans": [ + { + "bbox": [ + 307, + 513, + 463, + 635 + ], + "type": "image", + "image_path": "e36fe3888388f8faacc648f6647e91131e8f15d326a7c7ed9d7a8e014287a021.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 107, + 268, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 107, + 268, + 121 + ], + "spans": [ + { + "bbox": [ + 121, + 107, + 268, + 121 + ], + "type": "text", + "content": "Example of a CountDown game" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 120, + 129, + 342, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 129, + 342, + 141 + ], + "spans": [ + { + "bbox": [ + 120, + 129, + 342, + 141 + ], + "type": "text", + "content": "Target: 16 Candidates: [96, 11, 78, 22]" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 156, + 203, + 166 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 156, + 203, + 166 + ], + "spans": [ + { + "bbox": [ + 121, + 156, + 203, + 166 + ], + "type": "text", + "content": "DFS Search Trace" + } + ] + } + ], + "index": 3 + }, + { + "type": "code", + "bbox": [ + 138, + 175, + 428, + 523 + ], + "blocks": [ + { + "bbox": [ + 138, + 175, + 428, + 523 + ], + "lines": [ + { + "bbox": [ + 138, + 175, + 428, + 523 + ], + "spans": [ + { + "bbox": [ + 138, + 175, + 428, + 523 + ], + "type": "text", + "content": "Current State: 16:[96, 11, 78, 22], Operations: [ ] \nExploring Operation: 96-78=18, Resulting Numbers: [11, 22, 18] \nGenerated Node #0,0: 16:[11, 22, 18] Operation: 96-78=18 \nMoving to Node #0,0 \nCurrent State: 16:[11, 22, 18], Operations: ['96-78=18'] \nExploring Operation: 22-11=11, Resulting Numbers: [18, 11] \nGenerated Node #0,0,0: 16:[18, 11] Operation: 22-11=11 \nMoving to Node #0,0,0 \nCurrent State: 16:[18, 11], Operations: ['96-78=18', '22-11=11'] \nExploring Operation: 18-11=7, Resulting Numbers: [7] \n7,16 unequal: No Solution \nMoving to Node #0,0,0 \nCurrent State: 16:[18, 11], Operations: ['96-78=18', '22-11=11'] \nExploring Operation: 18+11=29, Resulting Numbers: [29] \n29,16 unequal: No Solution \nMoving to Node #0,0 \nCurrent State: 16:[11, 22, 18], Operations: ['96-78=18'] \nExploring Operation: 18-11=7, Resulting Numbers: [22, 7] \nGenerated Node #0,0,1: 16:[22, 7] Operation: 18-11=7 \nMoving to Node #0,0,1 \nCurrent State: 16:[22, 7], Operations: ['96-78=18', '18-11=7'] \nExploring Operation: 22-7=15, Resulting Numbers: [15] \n15,16 unequal: No Solution \nMoving to Node #0,0,1 \nCurrent State: 16:[22, 7], Operations: ['96-78=18', '18-11=7'] \nExploring Operation: 22+7=29, Resulting Numbers: [29] \n29,16 unequal: No Solution \nMoving to Node #0,0 \nCurrent State: 16:[11, 22, 18], Operations: ['96-78=18'] \nExploring Operation: 22/11=2, Resulting Numbers: [18, 2] \nGenerated Node #0,0,2: 16:[18, 2] Operation: 22/11=2 \nMoving to Node #0,0,2 \nCurrent State: 16:[18, 2], Operations: ['96-78=18', '22/11=2'] \nExploring Operation: 18-2=16, Resulting Numbers: [16] \n16,16 equal: Goal Reached" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 121, + 539, + 220, + 550 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 539, + 220, + 550 + ], + "spans": [ + { + "bbox": [ + 121, + 539, + 220, + 550 + ], + "type": "text", + "content": "Direct Solution Trace" + } + ] + } + ], + "index": 5 + }, + { + "type": "code", + "bbox": [ + 138, + 557, + 419, + 647 + ], + "blocks": [ + { + "bbox": [ + 138, + 557, + 419, + 647 + ], + "lines": [ + { + "bbox": [ + 138, + 557, + 419, + 647 + ], + "spans": [ + { + "bbox": [ + 138, + 557, + 419, + 647 + ], + "type": "text", + "content": "Current State: 16:[96, 11, 78, 22], Operations: [ ] \nExploring Operation: 96-78=18, Resulting Numbers: [11, 22, 18] \nGenerated Node #2: [11, 22, 18] from Operation: 96-78=18 \nCurrent State: 16:[11, 22, 18], Operations: ['96-78=18'] \nExploring Operation: 22/11=2, Resulting Numbers: [18, 2] \nGenerated Node #3: [18, 2] from Operation: 22/11=2 \nCurrent State: 16:[18, 2], Operations: ['96-78=18', '22/11=2'] \nExploring Operation: 18-2=16, Resulting Numbers: [16] \n16,16 equal: Goal Reached" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 104, + 670, + 506, + 703 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 670, + 506, + 703 + ], + "spans": [ + { + "bbox": [ + 104, + 670, + 506, + 703 + ], + "type": "text", + "content": "Figure 14: Example of training data for CoutnDown. Top: game setup. Middle: full DFS search trace for training backtracking model. Bottom: correct solution path for training direct solution model." + } + ] + } + ], + "index": 7, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 149, + 152, + 302, + 295 + ], + "blocks": [ + { + "bbox": [ + 206, + 144, + 244, + 152 + ], + "lines": [ + { + "bbox": [ + 206, + 144, + 244, + 152 + ], + "spans": [ + { + "bbox": [ + 206, + 144, + 244, + 152 + ], + "type": "text", + "content": "Initial Board" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 149, + 152, + 302, + 295 + ], + "lines": [ + { + "bbox": [ + 149, + 152, + 302, + 295 + ], + "spans": [ + { + "bbox": [ + 149, + 152, + 302, + 295 + ], + "type": "table", + "html": "
7245
17
839
51
297
32
84
4789
6213
", + "image_path": "7e900a1a3702d874176ede358ecb138cc0af4e0c40a03c1f9b5a345fc93aee4a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 308, + 152, + 462, + 295 + ], + "blocks": [ + { + "bbox": [ + 364, + 144, + 406, + 152 + ], + "lines": [ + { + "bbox": [ + 364, + 144, + 406, + 152 + ], + "spans": [ + { + "bbox": [ + 364, + 144, + 406, + 152 + ], + "type": "text", + "content": "Solved Board" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 308, + 152, + 462, + 295 + ], + "lines": [ + { + "bbox": [ + 308, + 152, + 462, + 295 + ], + "spans": [ + { + "bbox": [ + 308, + 152, + 462, + 295 + ], + "type": "table", + "html": "
712498635
964135278
583726194
876954312
239861457
451372986
328549761
147683529
695217843
", + "image_path": "b78f6358bb8f2f1fa9c15f4102fea440d9c5a986e0710809e9168c38b2874448.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 308, + 247, + 319 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 308, + 247, + 319 + ], + "spans": [ + { + "bbox": [ + 121, + 308, + 247, + 319 + ], + "type": "text", + "content": "Example of a Sudoku game" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "spans": [ + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "text", + "content": "START " + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(0, 0) = 7" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(0, 2) = 2" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(0, 3) = 4" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(0, 8) = 5" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(1, 3) = 1" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(1, 7) = 7" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(2, 1) = 8" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(2, 2) = 3" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(2, 7) = 9" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(3, 4) = 5" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(3, 7) = 1" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(4, 0) = 2" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(4, 2) = 9" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(4, 8) = 7" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(5, 3) = 3" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(5, 5) = 2" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(6, 2) = 8" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(6, 4) = 4" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(7, 1) = 4" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(7, 2) = 7" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(7, 4) = 8" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(7, 8) = 9" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(8, 0) = 6" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(8, 3) = 2" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(8, 4) = 1" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "inline_equation", + "content": "(8, 8) = 3" + }, + { + "bbox": [ + 120, + 328, + 490, + 370 + ], + "type": "text", + "content": " solving" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 121, + 384, + 202, + 394 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 384, + 202, + 394 + ], + "spans": [ + { + "bbox": [ + 121, + 384, + 202, + 394 + ], + "type": "text", + "content": "DFS Search Trace" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 120, + 402, + 490, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 402, + 490, + 540 + ], + "spans": [ + { + "bbox": [ + 120, + 402, + 490, + 540 + ], + "type": "text", + "content": "SOL_START \\((4, 4) = 6\\) \\((8, 2) = 5\\) \\((4, 3) = 8\\) \\((8, 1) = 9\\) \\((8, 5) = 7\\) GUESS: \\((0, 1)\\) \\([1, 6] = 1\\) \\((0, 1) = 1\\) GUESS: \\((0, 4)\\) \\([3, 9] = 3\\) \\((0, 4) = 3\\) GUESS: \\((0, 6)\\) \\([6, 8] = 6\\) \\((0, 6) = 6\\) \\((0, 7) = 8\\) \\((8, 7) = 4\\) \\((0, 5) = 9\\) \\((1, 4) = 2\\) \\((1, 8) = 4\\) \\((2, 4) = 7\\) \\((3, 5) = 1\\) \\((4, 5) = 9\\) \\((8, 6) = 8\\) \\((1, 2) = 6\\) \\((1, 6) = 3\\) \\((3, 3) = 7\\) \\((1, 1) = 5\\) \\((1, 5) = 8\\) \\((2, 0) = 4\\) \\((4, 1) = 3\\) \\((4, 7) = 5\\) \\((5, 6) = 4\\) \\((5, 7) = 6\\) \\((5, 8) = 8\\) \\((6, 1) = 2\\) \\((7, 7) = 2\\) \\((1, 0) = 9\\) \\((3, 0) = 8\\) \\((3, 1) = 6\\) \\((3, 8) = 2\\) \\((5, 1) = 7\\) \\((5, 2) = 1\\) \\((2, 8) = 1\\) \\((3, 6) = 9\\) \\((5, 0) = 5\\) \\((6, 8) = 6\\) \\((2, 6) = 2\\) NO_CANDIDATE: \\((3, 2)\\) NO CANDIDATE: \\((0, 6)\\) REVERT: \\((0, 6)\\) \\([6, 8] =\\) NONE GUESS: \\((0, 6)\\) \\([6, 8] = 8\\) \\((0, 6) = 8\\) \\((0, 7) = 6\\) \\((8, 6) = 4\\) \\((8, 7) = 8\\) \\((0, 5) = 9\\) \\((1, 4) = 2\\) \\((1, 6) = 3\\) \\((1, 8) = 4\\) \\((2, 4) = 7\\) \\((3, 5) =\\) \\(4\\) \\((4, 5) = 1\\) \\((4, 6) = 5\\) \\((5, 4) = 9\\) \\((5, 6) = 6\\) \\((5, 7) = 4\\) \\((5, 8) = 8\\) \\((1, 2) = 6\\) \\((3, 3) = 7\\) \\((3, 8) = 2\\) \\((4,\\) \\(1) = 3\\) \\((5,2) = 1\\) \\((6,1) = 2\\) \\((6,7) = -2\\) \\((1,1) = -5\\) \\((1,5) = -8\\) \\((2,0) = -4\\) \\((2,8) = -1\\) \\((3,0)=\\( \\(8(3,1) = -6(3,6) = -7(7,0)=3(6,0)=1\\text{NO\\_CANDIDATE}:(3,2)\\text{NO\\_CANDIDATE}:(0,6)\\text{revert}:(0,6)[6,(8)]=\\)\\(\\text{NO\\_CANDIDATE NO\\_CANDIDATE}:(0,4)\\text{REVERT}:(0,4)[3,9]=\\text{NONE GUESS}:(0,4)[3,9]=9(0,4)=9(5,4)=7(2,4)=2(3,3)=9(3,5)=4(4,5)=1(1,4)=3(3,2)=6(5,1)=5(1,1)=6(1,2)=4(2,\\)\\)\\(\\begin{array}{r}\\emptyset=5(2,5)=6(4,1)=3(5,2)=1(6,1)=2(0,5)=8(1,\\emptyset)=9(1,5)=5(2,3)=7(3,\\emptyset)=8\\\\(3,\\text{1})=7(3,\\text{8})=2(5,\\emptyset)=4(7,\\text{5})=3(1,\\text{8})=8(3,\\text{6})=3(5,\\text{8})=6(6,\\text{5})=9(6,\\text{8})=1(7,\\emptyset)=\\\\1(0,\\emptyset)=6(0,\\emptyset)=3(1,\\emptyset)=2(2,\\emptyset)=4(5,\\emptyset)=8(6,\\emptyset)=3(7,\\emptyset)=5(8,\\emptyset)=4(2,\\emptyset)=1(4,\\emptyset)\\end{array}\\) \\(= (3,1) = -7(3,\\text{8}) = -6(4,\\text{7}) = -3(1,\\text{6}) = -2(2,\\text{8}) = -4(5,\\text{7}) = -8(6,\\text{0}) = -3(7,\\text{7}) = -2(8,\\text{6}) = -8(6,\\text{3}) = -5\\mathrm{SOL}\\quad\\mathrm{END}\\)" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 121, + 555, + 197, + 565 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 555, + 197, + 565 + ], + "spans": [ + { + "bbox": [ + 121, + 555, + 197, + 565 + ], + "type": "text", + "content": "Correct Solution" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 120, + 572, + 490, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 572, + 490, + 623 + ], + "spans": [ + { + "bbox": [ + 120, + 572, + 490, + 623 + ], + "type": "inline_equation", + "content": "\\begin{array}{l}\\mathrm{SOL\\_START(4,4) = 6(8,2) = 5(4,3) = 8(8,1) = 9(8,5) = 7(0,1) = 1(0,4) = 9(5,4) = 7(2,4) =}\\\\ \\mathrm{2(3,3) = 9(3,5) = 4(4,5) = 1(1,4) = 3(3,2) = 6(5,1) = 5(1,1) = 6(1,2) = 4(2,0) = 5(2,5)}\\\\ \\mathrm{= 6(4,1) = 3(5,2) = 1(6,1) = 2(0,5) = 8(1,0) = 9(1,5) = 5(2,3) = 7(3,0) = 8(3,1) = 7(3,}\\\\ \\mathrm{8) = 2(5,0) = 4(7,5) = 3(1,8) = 8(3,6) = 3(5,8) = 6(6,5) = 9(6,8) = 1(7,0) = 1(0,6) = 6}\\\\ (\\emptyset ,7) = 3(1,6) = 2(2,8) = 4(5,7) = 8(6,\\emptyset) = 3(7,6) = 5(8,7) = 4(2,6) = 1(4,6) = 4(4,7) = \\\\ \\mathrm{5(5,6)} = 9(6,6) = 7(6,7) = 6(7,3) = 6(7,7) = 2(8,6) = 8(6,3) = 5\\mathrm{SOL\\_END} \\end{array}" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 644, + 506, + 667 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 644, + 506, + 667 + ], + "spans": [ + { + "bbox": [ + 104, + 644, + 506, + 667 + ], + "type": "text", + "content": "Figure 15: Example of training data for Sudoku game. Top: initial puzzle setup. Middle: full search trace with guesses and backtracking (tabs used). Bottom: final correct solution." + } + ] + } + ], + "index": 11, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 83, + 306, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 83, + 306, + 95 + ], + "spans": [ + { + "bbox": [ + 121, + 83, + 306, + 95 + ], + "type": "text", + "content": "Example of a stacked-CountDown game" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 103, + 421, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 103, + 421, + 116 + ], + "spans": [ + { + "bbox": [ + 121, + 103, + 421, + 116 + ], + "type": "text", + "content": "Target: 96 Candidates: [22, 77, 24, 48, 31, 12, 36, 35]" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 130, + 202, + 141 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 130, + 202, + 141 + ], + "spans": [ + { + "bbox": [ + 121, + 130, + 202, + 141 + ], + "type": "text", + "content": "DFS Search Trace" + } + ] + } + ], + "index": 3 + }, + { + "type": "code", + "bbox": [ + 106, + 148, + 504, + 469 + ], + "blocks": [ + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "lines": [ + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "spans": [ + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": "Current State: 96:[22, 77, 24, 48, 31, 12, 36, 35], Operations: [] Exploring Operation: " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "77 - 48 = 29" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " ,Resulting Numbers: [22, 24, 29, 31, 12, 36, 35] Generated Node #0,0:96:[22, 24, 29, 31, 12, 36, 35]Operation: " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "77 - 48 = 29" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " Moving to Node " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "\\# 0,0" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " Current State:96:[22, 24, 29, 31, 12, 36, 35],Operations:['77-48=29'] Exploring Operation: " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "22 + 24 = 46" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " ,Resulting Numbers: [29, 46, 31, 12, 36, 35] Generated Node " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "\\# 0,0,0" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " :96:[29,46,31,12,36,35]Operation: " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "22 + 24 = 46" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " Moving to Node " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "\\# 0,0,0" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " Current State:96:[29,46,31,12,36,35],Operations:'77-48=29', '22+24=46' ] Exploring Operation: " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "46 - 29 = 17" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " ,Resulting Numbers: [17,31,12,36,35] 17,31 unequal Moving to Node " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "\\# 0,0" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " ... . . . Current State:96:[29,2,31,12,36,35],Operations:'77-48=29', '24-22=2' ] Exploring Operation: " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "29 + 2 = 31" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " ,Resulting Numbers: [31,31,12,36,35] 31,31 equal Current State:96:[31,12,36,35],Operations:[] Exploring Operation: " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "36 - 35 = 1" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " ,Resulting Numbers: [31,12,1] Generated Node " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "\\# 0,0" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " :96:[31,12,1]Operation: " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "36 - 35 = 1" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " Moving to Node " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "\\# 0,0" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " Current State:96:[31,12,1],Operations:'36-35=1' Exploring Operation: " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "31 + 1 = 32" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " ,Resulting Numbers: [12,32] Generated Node " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "\\# 0,0,0" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " :96:[12,32]Operation: " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "31 + 1 = 32" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " Moving to Node " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "\\# 0,0,0" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " Current State:96:[12,32],Operations:' " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "36 - 35 = 1" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " ,' " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "31 + 1 = 32" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " ' Exploring Operation: " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "12 + 32 = 44" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " ,Resulting Numbers: [44] 44.96 unequal:No Solution ... Exploring Operation: " + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "inline_equation", + "content": "4*24 = 96" + }, + { + "bbox": [ + 106, + 148, + 504, + 469 + ], + "type": "text", + "content": " ,Resulting Numbers:[96]" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "yaml" + }, + { + "bbox": [ + 121, + 483, + 219, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 483, + 219, + 495 + ], + "spans": [ + { + "bbox": [ + 121, + 483, + 219, + 495 + ], + "type": "text", + "content": "Direct Solution Trace" + } + ] + } + ], + "index": 5 + }, + { + "type": "code", + "bbox": [ + 106, + 501, + 501, + 684 + ], + "blocks": [ + { + "bbox": [ + 106, + 501, + 501, + 684 + ], + "lines": [ + { + "bbox": [ + 106, + 501, + 501, + 684 + ], + "spans": [ + { + "bbox": [ + 106, + 501, + 501, + 684 + ], + "type": "text", + "content": "Current State: 96:[22, 77, 24, 48, 31, 12, 36, 35], Operations: [] Exploring Operation: 77-22=55, Resulting Numbers: [55, 24, 48, 31, 12, 36, 35] Generated Node #2: [55, 24, 48, 31, 12, 36, 35] from Operation: 77-22=55 Current State: 96:[55, 24, 48, 31, 12, 36, 35], Operations: ['77-22=55'] Exploring Operation: 48-24=24, Resulting Numbers: [55, 24, 31, 12, 36, 35] Generated Node #3: [55, 24, 31, 12, 36, 35] from Operation: 48-24=24 Current State: 96:[55, 24, 31, 12, 36, 35], Operations: ['77-22=55', '48-24=24'] Exploring Operation: 55-24=31, Resulting Numbers: [31, 31, 12, 36, 35] 31,31 equal Current State: 96:[31, 12, 36, 35], Operations: [] Exploring Operation: 35-31=4, Resulting Numbers: [4, 12, 36] Generated Node #2: [4, 12, 36] from Operation: 35-31=4 Current State: 96:[4, 12, 36], Operations: ['35-31=4'] Exploring Operation: 36-12=24, Resulting Numbers: [24, 4] Generated Node #3: [24, 4] from Operation: 36-12=24 Current State: 96:[24, 4], Operations: ['35-31=4', '36-12=24'] Exploring Operation: " + }, + { + "bbox": [ + 106, + 501, + 501, + 684 + ], + "type": "inline_equation", + "content": "4 \\times 24 = 96" + }, + { + "bbox": [ + 106, + 501, + 501, + 684 + ], + "type": "text", + "content": " ,Resulting Numbers:[96] 96,96 equal: Goal Reached" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 104, + 704, + 504, + 739 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 704, + 504, + 739 + ], + "spans": [ + { + "bbox": [ + 104, + 704, + 504, + 739 + ], + "type": "text", + "content": "Figure 16: Example of training data for stacked-CoutnDown (Appendix F). Top: game setup. Middle: full DFS search trace for training backtracking model. Bottom: correct solution path for training direct solution model." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "spans": [ + { + "bbox": [ + 106, + 26, + 317, + 38 + ], + "type": "text", + "content": "Published as a conference paper at COLM 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07053/5069c415-7c54-431e-b348-92054587ecc3_content_list.json b/data/2025/2504_07xxx/2504.07053/5069c415-7c54-431e-b348-92054587ecc3_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..a3b98ded1ab3bf026bee270dbb89b4b3d3ec849f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/5069c415-7c54-431e-b348-92054587ecc3_content_list.json @@ -0,0 +1,2135 @@ +[ + { + "type": "text", + "text": "TASTE: Text-Aligned Speech Tokenization and Embedding for Spoken Language Modeling", + "text_level": 1, + "bbox": [ + 212, + 121, + 782, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Liang-Hsuan Tseng\\*23 Yi-Chang Chen\\*1 Kuan-Yi Lee\\*23 Da-Shan Shiu\\*1 Hung-yi Lee\\*3", + "bbox": [ + 189, + 223, + 810, + 242 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "*Equal contribution 1MediaTek Research", + "bbox": [ + 352, + 253, + 643, + 268 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{2}$ Internship at MediaTek Research $^{3}$ National Taiwan University", + "bbox": [ + 281, + 268, + 714, + 284 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{yi-chang.chen, ds.shiu}@mtkresearch.com", + "bbox": [ + 326, + 296, + 671, + 310 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{f11921067,b10901091,hungyilee}@ntu.edu.tw", + "bbox": [ + 308, + 311, + 687, + 324 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 359, + 537, + 376 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent efforts target spoken language models (SLMs) that not only listen but also speak for more natural human-LLM interaction. Joint speech-text modeling is a promising direction to achieve this. However, the effectiveness of recent speech tokens for joint modeling remains underexplored. To address this, we introduce Text-Aligned Speech Tokenization and Embedding (TASTE), a method that directly addresses the modality gap by aligning speech token with the corresponding text transcription during the tokenization stage. We propose a method that can achieve this through a attention-based aggregation mechanism and with speech reconstruction as the training objective. We conduct extensive experiments and show that TASTE can preserve essential paralinguistic information while dramatically reducing the token sequence length. With TASTE, we perform straightforward joint spoken language modeling by using Low-Rank Adaptation on the pre-trained text LLM. Experimental results show that TASTE-based SLMs perform comparable to previous work on SALMON and StoryCloze; while significantly outperform other pre-trained SLMs on speech continuation across subjective and objective evaluations. To our knowledge, TASTE is the first end-to-end approach that utilizes a reconstruction objective to automatically learn a text-aligned speech tokenization and embedding suitable for spoken language modeling. Our demo, code, and model are available at https://mtkresearch.github.io/TASTE-SpokenLM.github.io.", + "bbox": [ + 228, + 391, + 767, + 655 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 681, + 313, + 698 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Learning a speech representation suitable for universal speech-processing tasks has long been a significant challenge [31, 42, 48]. Unlike text—which can be encoded discretely [18, 19, 39]—speech is a continuous waveform carrying layered information (acoustic, semantic, prosodic, etc.). Recent neural self-supervised learning (SSL) methods move beyond filter-banks and MFCCs to encode raw audio into compact, high-dimensional embeddings that excel on discriminative tasks such as automatic speech recognition (ASR), emotion recognition, and speaker verification. Despite these gains, learning representations for generative speech tasks remains an open and more complex problem that has begun to attract focused attention [17, 28, 44, 45].", + "bbox": [ + 169, + 713, + 826, + 825 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Among the generative speech tasks, spoken language modeling (SLM) is an intriguing direction, aiming to create models that can not only listen but also speak. Typically, building an SLM requires two stages: first, deriving speech tokenizations; second, training a language model on the speech tokens. For the speech tokens, previous approaches either apply SSL-based representations following by discretization techniques [11, 14, 21, 32] or reuse units from neural codec models like EnCodec", + "bbox": [ + 169, + 830, + 826, + 902 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.07053v2 [cs.CL] 22 May 2025", + "bbox": [ + 22, + 258, + 60, + 707 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Preprint. Under review.", + "bbox": [ + 171, + 922, + 315, + 936 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/46c84b75462a0de2c6f9d19b33a64cae57a50c650d7f2f43f7c1867ad55d4557.jpg", + "image_caption": [ + "Figure 1: The concept overview. Conventional methods extract speech tokens solely from speech, inducing length-mismatch problem when conducting joint speech-text modeling. By taking dual modalities as input, we generate speech tokenization that is aligned with text, facilitating straightforward and effective joint modeling." + ], + "image_footnote": [], + "bbox": [ + 176, + 89, + 380, + 207 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/c6d5132251cd60884addea1cc932e94c730e578aeb4eca458f8ca4823797f9a7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 388, + 89, + 821, + 207 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "and SoundStream [5, 20, 41, 49]. Although autoregressive modeling with these speech tokens shows great potential in text-to-speech (TTS) [45, 47], previous SLMs that model only speech tokens [21, 32] have been shown to lack semantic fidelity [22].", + "bbox": [ + 169, + 311, + 823, + 354 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To bridge this gap, one promising direction is to utilize text—which is rich in semantic—during spoken language modeling. TWIST [11] shows that SLMs can benefit from initializing with text LLMs. More recent work often conducts joint speech-text modeling on tokens of both modalities to facilitate the semantic coherence on the generated speech [6, 9, 33, 46]. Yet integrating text and speech tokens introduces a length-mismatch challenge, as speech token sequences are usually longer than their text counterparts. Common remedies may include interleaving speech and text tokens [33] or inserting padding to synchronize sequence lengths between modalities [6, 9, 46]. However, these methods require either additional speech-text alignment or heuristic rules to enable joint modeling.", + "bbox": [ + 169, + 359, + 823, + 470 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we introduce Text-Aligned Speech Tokenization and Embedding (TASTE), a special type of speech tokenization tailored for speech-text joint spoken language modeling. By acknowledging that the length mismatch introduces additional complexity in joint modeling, we develop our speech token to be aligned with its corresponding text transcription tokens. To achieve this, we first obtain the textual transcription of a speech with the ASR model; then we derive the speech token based on the transcription through a specialized cross-attention mechanism for speech reconstruction. Note that the full process can be accomplished in an end-to-end manner, with no explicit speech-text alignment required. Unlike previous speech tokens that are developed under a fixed stride with fixed down-sampling rate, our speech token has dynamic frequency as it is text-aligned. Figure 1 shows an overall concept of TASTE, illustrating how our tokenization allows straightforward joint modeling.", + "bbox": [ + 169, + 477, + 826, + 617 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To evaluate the effectiveness of TASTE, we first conduct extensive experiments on speech reconstruction. Our results on LibriSpeech [34] show that TASTE not only resynthesizes speech in high quality, but also retains similarity to the original speech. TASTE achieves high-end reconstruction at an extremely low bit rate ( $\\sim 150$ bps); while the other comparable methods are often more than thousands of bps. More intriguingly, we demonstrate that TASTE allows simple text-aligned speech editing. By exchanging the partial text-aligned speech tokens from two different utterances with the same content, we demonstrate that the paralinguistic information such as duration and tone can be exchanged precisely following the words being exchanged, resulting in natural edited speech.", + "bbox": [ + 169, + 621, + 823, + 733 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "On the other hand, we demonstrate that TASTE successfully allows effective spoken language modeling. We perform straightforward joint modeling with TASTE under Low-Rank Adaptation [15]. We first perform speech continuation experiments with 3-second speech prompts given. The evaluation is three-fold. We use GPT-4o for evaluating the semantic aspect; UTMOS [38] for the acoustic aspect; and the human listening test for the general evaluation. Results show that our SLMs not only generate natural, meaningful speech continuations, but also outperforms the other 7B pre-trained SLMs across all the continuation evaluation aspects with 1.3B parameters. We also evaluate our SLMs on two benchmarks, SALMON [25] and StoryCloze [11] and our results show that our SLMs achieve comparable performance compared to the other speech-text joint modeling methods. Moreover, we show that our pretrained SLM can perform spoken question answering under few-shot scenario.", + "bbox": [ + 169, + 738, + 826, + 877 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, we derive TASTE, a text-aligned speech tokenization that allows effective joint speech-text spoken language modeling. By aligning the speech tokenization with its text counterpart during", + "bbox": [ + 169, + 883, + 826, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/1ec9832577223e0e450fbaf505a535d7ac71357bf1e0ccf1924ef8ca6ebe0e86.jpg", + "image_caption": [ + "Figure 2: The overall framework of our text-aligned speech tokenization and embedding. The left side illustrate the process of obtaining the TASTE tokenization $\\hat{z}$ , detailed in Section 2.1.1; while the right side demonstrate how we reconstruct the speech with TASTE (Section 2.1.2). The training objective for our speech reconstruction is discussed in Section 2.1.3." + ], + "image_footnote": [], + "bbox": [ + 176, + 95, + 816, + 272 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "the tokenization stage, TASTE enables straightforward modeling. To our best knowledge, we are the first one to utilize the reconstruction objective to automatically derive a text-aligned speech tokenization and embedding that is suitable for joint speech-text spoken language modeling. Our demo is available at https://mtkresearch.github.io/TASTE-SpokenLM.github.io.", + "bbox": [ + 169, + 366, + 823, + 422 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Method", + "text_level": 1, + "bbox": [ + 171, + 441, + 272, + 455 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We propose text-aligned speech tokenization and embedding (TASTE) to facilitate effective joint speech-text spoken language modeling. Here, we first introduce how we derive our tokenization—TASTE—in Section 2.1, and then discuss how we use TASTE for spoken language modeling (§ 2.2).", + "bbox": [ + 169, + 472, + 826, + 516 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Building TASTE", + "text_level": 1, + "bbox": [ + 171, + 531, + 328, + 545 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "As depicted in Figure 2, TASTE is comprised of the two main components: the text-aligned speech tokenizer ( $\\S$ 2.1.1) that produces the text-aligned speech tokenization; and the speech decoder ( $\\S$ 2.1.2) to reconstruct speech based on the text token and the TASTE speech token aligned with it. The training objective of speech reconstruction is described in Section 2.1.3.", + "bbox": [ + 169, + 556, + 823, + 613 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1.1 TASTE Speech Tokenizer", + "text_level": 1, + "bbox": [ + 171, + 627, + 403, + 642 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In TASTE, the speech tokenizer, denoted as Tokenizer(\\cdot), is designed to generate the text-aligned speech tokenization and embedding with the speech-text pair $X = (u, v)$ taken as input, where $v$ represents the textual transcription of the speech utterance $u$ , which can be easily obtained through an automatic speech recognition (ASR) system. Recent developments in robust and efficient ASR ([10, 35]) allow us to focus on discussing how to derive the text-aligned speech token effectively by assuming that $v$ is of sufficient quality. The TASTE speech tokenizer is composed of three major components: an encoder, an aggregator, and a quantizer.", + "bbox": [ + 169, + 651, + 823, + 750 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The encoder Encoder $(\\cdot)$ contains $L$ layers of Transformer ([43]) encoder blocks and is used to extract high-dimensional speech representation. We employ the pre-trained Whisper ASR encoder [35] as our speech encoder, and it is frozen during training. For an input speech utterance $\\pmb{u}$ , the encoder produces a sequence of hidden states from each layer $[h^{(1)}, h^{(2)}, \\dots, h^{(L)}]$ . In our experiments, we retain the last hidden layer representation $h^{(L)}$ and the shallow representation $h^{(l)}$ from the first half of the hidden representations of the encoder for later usage, denoted as:", + "bbox": [ + 169, + 753, + 825, + 842 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {h} ^ {(L)}, \\boldsymbol {h} ^ {(l)} = \\operatorname {E n c o d e r} (\\boldsymbol {u}), \\quad \\text {w h e r e} 1 \\leq l \\leq \\left\\lfloor \\frac {L}{2} \\right\\rfloor .\n$$\n", + "text_format": "latex", + "bbox": [ + 326, + 858, + 668, + 878 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Note that both of the hidden representations $\\pmb{h}^{(L)}, \\pmb{h}^{(l)} \\in \\mathbb{R}^{T \\times d_h}$ have their length denoted as $T$ and the hidden dimension indicated by $d_h$ .", + "bbox": [ + 169, + 882, + 823, + 912 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The hidden representations extracted from the encoder are then passed to the aggregator. The aggregator is designed to obtain a more compressed speech representation $\\mathbf{z}$ that is aligned in length with the text transcription $\\mathbf{v}$ . Consider that $\\mathbf{v} = [v_{1}, v_{2}, \\dots, v_{N}]$ , $v_{i} \\in \\mathbb{V}$ is a text token sequence with length $N$ , the input and output of the aggregator can be denoted as:", + "bbox": [ + 169, + 90, + 823, + 148 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {z} = \\operatorname {A g g r e g a t o r} (\\boldsymbol {v}, \\boldsymbol {h} ^ {(L)}, \\boldsymbol {h} ^ {(l)}), \\text {w h e r e} \\boldsymbol {z} \\in \\mathbb {R} ^ {N \\times d _ {z}}, \\boldsymbol {v} \\in \\mathbb {V} ^ {N}, \\text {a n d} \\boldsymbol {h} ^ {(L)}, \\boldsymbol {h} ^ {(l)} \\in \\mathbb {R} ^ {T \\times d _ {h}}.\n$$\n", + "text_format": "latex", + "bbox": [ + 207, + 151, + 785, + 170 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To make the speech representation $z$ text-aligned, we conduct a simple yet effective attention mechanism based on the three inputs. Consider that the original multi-head attention in [43] is denoted as MultiHead( $Q, K, V$ ), our first layer attention in the aggregator takes:", + "bbox": [ + 169, + 174, + 823, + 217 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nQ = \\text {t e x t} v, \\quad K = \\text {e n c o d e r} h ^ {(L)}, \\quad V = \\text {e n c o d e r} h ^ {(l)}.\n$$\n", + "text_format": "latex", + "bbox": [ + 179, + 222, + 813, + 239 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "By doing so, the length of our first multi-head attention output should follow the text transcription $v$ . Note that the query of the following layers becomes the output from the previous layer. In addition, intuitions of using the encoder's last hidden representation as keys, and the shallow hidden representation as values can be described as follows: 1) In Transformer-based ASR models, the last hidden states often encode rich speech-text alignment cues; sometimes the cross-attention weight matrices can even be exploited as soft word-alignment maps [10, 35]. 2) The shallow representation has been shown to support high-quality speech reconstruction even when the quantization is applied [7, 8]. Based on the above observations, we design our aggregator that can use the soft attention maps obtained from last encoder representations and the text transcriptions, to aggregate the shallow encoder representations that is beneficial for high-end speech reconstruction.", + "bbox": [ + 169, + 243, + 826, + 382 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "After getting the text-aligned representation, the quantizer $\\mathrm{Quantizer}(\\cdot)$ is adopted to discretize the text-aligned representation. We use the residual vector quantization (RVQ) to allow coarse-to-fine quantization. Given the text-aligned speech representation $z$ and the quantizer containing $R$ residual vector quantization layers, we generate:", + "bbox": [ + 169, + 388, + 823, + 445 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {q}, \\hat {\\boldsymbol {z}} = \\operatorname {Q u a n t i z e r} (\\boldsymbol {z}), \\quad \\boldsymbol {q} = \\left[ \\boldsymbol {q} ^ {(1)}, \\boldsymbol {q} ^ {(2)}, \\dots , \\boldsymbol {q} ^ {(R)} \\right], \\quad \\hat {\\boldsymbol {z}} = \\sum_ {r = 1} ^ {R} \\hat {\\boldsymbol {z}} ^ {(r)} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 263, + 449, + 825, + 491 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where each $\\pmb{q}^{(r)} \\in \\mathbb{C}^N$ denotes the $r$ -th layer code sequence with code set $\\mathbb{C}$ ; and the quantized embedding $\\hat{\\pmb{z}}$ to be the summation over each layer of the codebook vectors. Note that both of the code sequence and the quantized speech embedding $\\hat{\\pmb{z}}$ are text-aligned, with the lengths to be $N$ .", + "bbox": [ + 169, + 496, + 823, + 540 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.1.2 TASTE Speech Decoder", + "text_level": 1, + "bbox": [ + 171, + 553, + 393, + 568 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The speech decoder aims to perform speech reconstruction conditioned on the text token sequence and the text-aligned speech tokenization. As shown in Figure 2, the text and speech tokens are aligned in lengths and being fed into the speech decoder after weighted sum in an autoregressive manner. The speech decoder is composed of the two components: the unit decoder and the unit-to-speech vocoder.", + "bbox": [ + 169, + 577, + 825, + 633 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The unit decoder UnitDecoder $(\\cdot)$ is a Transformer-based decoder that takes the text token sequence $\\mathbf{v}$ and the aligned speech embedding $\\hat{\\mathbf{z}}$ as input and predicts the speech unit $\\mathbf{y}$ for reconstruction:", + "bbox": [ + 169, + 638, + 823, + 667 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\boldsymbol {y} = \\operatorname {U n i t D e c o d e r} (\\hat {\\boldsymbol {z}}, \\boldsymbol {v}). \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 410, + 672, + 823, + 688 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Note that the additional speaker embedding is also taken as input to facilitate global speaker voice control in our spoken language models [16]. After we generating the speech unit $\\mathbf{y}$ , we use a unit-to-speech vocoder to further transform the unit into the reconstructed speech.", + "bbox": [ + 169, + 691, + 823, + 736 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.1.3 Training Objective", + "text_level": 1, + "bbox": [ + 171, + 748, + 357, + 763 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Similar to other reconstruction-based speech tokens [24, 51], we derive TASTE by training it for speech resynthesis. To achieve this, we extract the speech unit $\\pmb{y}^{\\mathrm{target}}$ with length $T'$ from the original speech $u$ as the target unit for our speech tokenizer and speech decoder. Given the text transcription $v$ , the TASTE speech embedding $\\hat{z}$ , and the unit from the original speech $\\pmb{y}^{\\mathrm{target}}$ as the target, the speech reconstruction through the tokenizer and the unit decoder parametrized by $\\theta$ under the next prediction schema can be considered as minimizing the cross-entropy loss below:", + "bbox": [ + 169, + 772, + 825, + 856 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {c e}} (\\theta) = \\frac {1}{| T ^ {\\prime} |} \\sum_ {t = 1} ^ {T ^ {\\prime}} - \\log p _ {\\theta} \\left(y _ {t} ^ {\\text {t a r g e t}} \\mid \\hat {\\boldsymbol {z}}, \\boldsymbol {v}; \\boldsymbol {y} _ {< t} ^ {\\text {t a r g e t}}\\right) \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 344, + 873, + 825, + 914 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "On the other hand, we employ the quantization loss as well to tokenize the continuous representation $z$ extracted from the encoder-aggregator. Following prior works [5, 49], given that $z^{(r)}$ is the $r$ -th residual and $\\hat{z}^{(r)}$ indicates the $r$ -th quantized residual, the commitment loss is defined as:", + "bbox": [ + 169, + 90, + 823, + 137 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {r v q}} (\\theta) = \\sum_ {r = 1} ^ {R} \\| z ^ {(r)} - \\hat {z} ^ {(r)} \\|. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 398, + 143, + 825, + 185 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "By summation over both losses, we formulate the overall loss for training TASTE as:", + "bbox": [ + 171, + 196, + 730, + 212 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\text {t a s t e}} = \\mathcal {L} _ {\\mathrm {c e}} + \\mathcal {L} _ {\\mathrm {r v q}}. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 431, + 219, + 825, + 234 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Note that to allow gradient to back-propagate from the unit decoder through the tokenizer, the straight-through estimation technique is applied towards the quantization process during training.", + "bbox": [ + 169, + 241, + 823, + 270 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.2 TASTE for Spoken Language Modeling", + "text_level": 1, + "bbox": [ + 171, + 286, + 488, + 301 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Next, we describe how we conduct effective spoken language modeling with TASTE. Following previous work [11, 33], we perform pre-training on speech data. The text transcription of the speech data is also used for joint speech-text pre-training of our text-aligned spoken language model (TASLM). Since TASTE tokenization already aligns with the text token sequence, we can conduct a straightforward joint modeling, as illustrated in Figure 1. To demonstrate the robustness of TASTE, we perform two types of text-aligned spoken language modeling. First, we build $\\mathrm{TASLM}_{\\mathrm{token}}$ over our text-aligned speech token $q$ , discussed in Section 2.2.1. Then, we show how we build $\\mathrm{TASLM}_{\\mathrm{emb}}$ with our text-aligned speech embedding $\\hat{z}$ , detailed in Section 2.2.2.", + "bbox": [ + 169, + 311, + 826, + 422 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.2.1 Modeling TASTE Token", + "text_level": 1, + "bbox": [ + 171, + 436, + 393, + 450 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "As our speech tokens derived from the RVQ quantizer contain $R$ layers of codes, we employ $R$ linear heads for multi-head prediction in our $\\mathrm{TASLM}_{\\mathrm{token}}$ . Namely, the $\\mathrm{TASLM}_{\\mathrm{token}}$ simultaneously predicts the next text token and the corresponding $R$ layers of speech tokens in each step. The overall training objective follows the original next token prediction scheme, but with multiple predictions across modalities at each step. Specifically, given the text transcription $v$ and $R$ layers of quantized RVQ codes $q$ , the multi-head next-token prediction training objective can be formulated as:", + "bbox": [ + 169, + 459, + 823, + 544 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {t o k e n}} (\\phi) = \\frac {1}{| N |} \\sum_ {i = 1} ^ {N} \\left(- \\log p _ {\\phi} ^ {\\text {t e x t}} \\left(v _ {i} \\mid \\boldsymbol {v} _ {< i}, \\boldsymbol {q} _ {< i}\\right) + \\sum_ {r = 1} ^ {R} - \\log p _ {\\phi} ^ {(\\mathrm {r})} \\left(q _ {i} ^ {(\\mathrm {r})} \\mid \\boldsymbol {v} _ {< i}, \\boldsymbol {q} _ {< i}\\right)\\right), \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 236, + 551, + 825, + 592 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "with $\\phi$ represents the parameter of the TASLM token, and $p^{(r)}$ is the $r$ -th probability prediction for the $r$ -th RVQ code. As for inference, we directly sample the codes and the text simultaneously, and transform the codes into the corresponding embedding for the speech decoder to generate speech.", + "bbox": [ + 169, + 599, + 823, + 643 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.2.2 Modeling TASTE Embedding", + "text_level": 1, + "bbox": [ + 171, + 657, + 433, + 672 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Besides the token code sets, recent progress on latent modeling [17, 28] motivates us to conduct experiments on modeling our text-aligned speech embedding. Referencing MELLE [28], we employ a linear layer that predicts the mean vector $\\mu_{i}$ and a log-magnitude variance vector $\\log \\sigma_i^2$ , where $i$ indicates the $i$ -th frame of the sequence. And the final predicted latent of frame $i$ is denoted as $e_i = \\mu_i + \\sigma_i\\odot \\epsilon$ , where $\\epsilon \\sim \\mathcal{N}(0,I)$ . Following MELLE, the straight-through estimator is applied to allow gradients to back-propagate properly during training.", + "bbox": [ + 169, + 681, + 823, + 765 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To facilitate latent prediction, we apply the regularization loss and the Kullback-Leibler (KL) divergence loss driving training, which is described as follows:", + "bbox": [ + 169, + 770, + 826, + 799 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {r e g}} (\\psi) = \\left\\| \\boldsymbol {e} _ {\\psi} - \\hat {\\boldsymbol {z}} \\right\\| _ {2} ^ {2}, \\quad \\mathcal {L} _ {\\mathrm {K L}} = \\frac {1}{2} \\sum_ {i = 1} ^ {N} \\sum_ {j = 1} ^ {d _ {z}} \\left(\\sigma_ {i} [ j ] + \\left(\\mu_ {i} [ j ] - \\hat {z} _ {i} [ j ]\\right) ^ {2}\\right) - 1 - \\log \\sigma_ {i} ^ {2} [ j ]), \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 210, + 806, + 825, + 849 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $\\psi$ indicates the parameter of TASLMemb, and $d_{z}$ is the dimension of our text-aligned embedding $\\hat{z}$ . The regularization loss $\\mathcal{L}_{\\mathrm{reg}}$ is adopted to predict close latent towards the target embedding $\\hat{z}$ . The KL divergence loss calculates the KL divergence between the predicted latent distribution and the target distribution. Following MELLE, we select the target distribution to be $\\mathcal{N}(\\hat{z}_i,I)$ .", + "bbox": [ + 169, + 854, + 826, + 912 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This allows simplification of $\\mathcal{L}_{\\mathrm{KL}}$ , which can then be approximated with the predicted vectors $\\mu_i, \\sigma_i$ , and the target embedding $\\hat{z}_i$ . Finally, the overall loss along with the text loss is described as:", + "bbox": [ + 169, + 90, + 823, + 122 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {e m b}} (\\psi) = \\lambda_ {\\mathrm {r e g}} \\cdot \\mathcal {L} _ {\\mathrm {r e g}} + \\lambda_ {\\mathrm {K L}} \\cdot \\mathcal {L} _ {\\mathrm {K L}} + \\frac {1}{| N |} \\sum_ {i = 1} ^ {N} - \\log p _ {\\psi} ^ {\\text {t e x t}} \\left(v _ {i} \\mid \\boldsymbol {v} _ {< i}, \\hat {\\boldsymbol {z}} _ {< i}\\right), \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 264, + 128, + 825, + 170 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\lambda_{\\mathrm{reg}}$ , $\\lambda_{\\mathrm{KL}}$ to be the weighted coefficients of the two losses, respectively.", + "bbox": [ + 169, + 176, + 683, + 194 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3 Experiment Setup", + "text_level": 1, + "bbox": [ + 171, + 210, + 359, + 229 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1 Model Configuration", + "text_level": 1, + "bbox": [ + 171, + 242, + 362, + 258 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For our TASTE speech tokenizer, we initialize our encoder from Whisper [35]. Specifically, we use whisper-large-v3 for our initialization. By doing so, we can reduce computational cost between obtaining the ASR transcription and extracting the TASTE tokenization with the TASTE encoder frozen during training. On the other hand, we use the S3 token from CosyVoice [7] as the target unit for speech reconstruction. Since their speech tokenization facilitates additional speaker embedding, we follow the same procedure to obtain one. Adding speaker embedding allows global speaker voice control, which is a reasonable and useful scenario for spoken language models. The unit-to-speech vocoder is comprised of a flow model [23, 27] and a HifiGAN. We use the published pre-trained ones from [7], and they are not involved in our training. For the quantizer, we set the RVQ layer $R = 4$ , the codebook size 512, and the codebook dimension to be 256. For the spoken language modeling, we follow previous work [6, 11, 22, 33] and initialize our spoken language model from a text LLM. However, this introduces the vocabulary mismatch problem between the ASR and LLM. We resolve this issue by using word-level TASTE tokenization and embedding, which is detailed in Appendix A.2. Moreover, we conduct Low-Rank Adaptation (LoRA) for parameter-efficient fine-tuning of our TASLMs. We set the corresponding hyperparameters rank $r = 64$ and $\\alpha = 128$ .", + "bbox": [ + 169, + 267, + 826, + 476 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2 Dataset", + "text_level": 1, + "bbox": [ + 171, + 491, + 267, + 505 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We use two datasets—Emilia and LibriTTS—as our training datasets. Emilia [12] is an in-the-wild dataset where the speech is web-scaled and the transcriptions are pseudo-labeled. We use only the English subset of this multi-lingual corpus, which is about 40,000 hours. LibriTTS [50] is a reading-style corpus based on LibriSpeech [34]. We use all the training splits in LibriTTS for training, which is approximately 600 hours of speech. In addition, the test-clean split in LibriSpeech is used for evaluation purposes for our TASTE tokenizer and TASLMs.", + "bbox": [ + 169, + 517, + 826, + 602 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 Result", + "text_level": 1, + "bbox": [ + 171, + 619, + 261, + 636 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We separate the evaluation into two phases: Section 4.1 shows the results regarding our TASTE tokenization; while Section 4.2 evaluates our TASLM across multiple aspects, including acoustic, semantic, and continuation. For clarity, the metrics are introduced within each section.", + "bbox": [ + 169, + 651, + 826, + 695 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Results of TASTE Tokenization", + "text_level": 1, + "bbox": [ + 171, + 710, + 433, + 724 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1.1 Speech Reconstruction Evaluation", + "text_level": 1, + "bbox": [ + 171, + 734, + 465, + 751 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We first represent the speech reconstruction evaluation results. For comprehensive evaluation, we use different metrics, including the reference-free metrics for quality assessment, and the reference-based metrics for evaluating the similarity between the reconstructed and the original speech.", + "bbox": [ + 169, + 760, + 823, + 801 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Quality Assessment We use ASR-WER, UTMOS [38], and DNS-MOS [37] as our metrics for evaluating the speech quality. For ASR-WER, we use HuBERT-Large [14] as the ASR model to transcribe the speech, and then calculate the word-error rate (WER) on the transcription. ${}^{1}$ UTMOS and DNS-MOS are both neural-based MOS predictors. While both evaluate the speech quality, the design purpose of DNS-MOS makes it more suitable for evaluation regarding the noise levels.", + "bbox": [ + 169, + 816, + 825, + 888 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "", + "bbox": [ + 192, + 897, + 611, + 912 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/cd044b89abcba5b2d464085ca59cc04326658efa54fca2b094bce5b36bb2d8b4.jpg", + "table_caption": [ + "Table 1: The speech tokenization evaluation results on the test-clean split of LibriTTS. The evaluation is separated into the QUALITY and the SIMILARITY assessments, as introduced in Section 4.1.1. We use gray text to indicate the worst-performing methods in each metric." + ], + "table_footnote": [], + "table_body": "
MethodBitrateQUALITYSIMILARITY
WER ↓UTMOSDNSMOSViSQOLDrtn. Con.Spkr. Sim.MUSHRA
Ground Truth256k2.1%4.093.84---76.6
Encodec [5]15005.1%1.583.263.460.940.63-
30002.6%2.353.483.810.960.7825.6
SpeechTokenizer [51]5005.2%1.272.992.800.940.35-
20003.0%3.563.603.650.970.8053.9
40002.5%3.903.764.030.980.92-
Mimi [6]10003.1%3.603.603.620.960.8267.6
S3 token (topline) [7]6003.0%4.183.903.300.960.8270.2
Text-only (baseline)~505.9%4.314.112.440.570.7842.6
TASTE (ours)~1504.4%4.294.103.050.910.8068.3
", + "bbox": [ + 173, + 138, + 823, + 318 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Similarity Assessment For similarity, we measure ViSQOL, duration consistency (Drtn. Con.), speaker similarity (Spkr. Sim.) and the MUSHRA test as human evaluation. We use ViSQOL [4] is a production-ready tool that predicts speech quality via spectro-temporal image similarity comparisons. For the duration consistency, we first get the word-level alignment of the transcriptions of the original and the reconstructed speech using Montreal Forced Aligner [26]; then we calculate if the duration between each of the same words is matched under a preset tolerance window, which is set to 50 milliseconds. For MUSHRA human listening test, we reference the original protocol [40] to instruct evaluators to rate the similarity and quality on a scale of 1 to 100 with reference given.", + "bbox": [ + 169, + 345, + 826, + 455 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Speech Reconstruction Results The evaluation results of our speech reconstruction on LibriSpeech are shown in Table 1. We highlight that our TASTE speech tokenization has the lowest bitrate among all the other speech tokenization methods. Note that since the speech tokenization is dynamic in frequency, we estimate our bitrate based on calculating the overall token count and the duration over the testing set. Despite the low bitrate, we generally attain much better performance comparing with the worst performing methods (gray text in the table) across each metric. Moreover, on the quality assessment, our MOS prediction scores are the second highest and even surpasses the ground truth, showcasing the reconstructed speech is of high quality. Next, we focus on the results of the similarity assessment. As for the duration consistency, we score the second-worst performance comparing with the other methods. We attribute this to the fact that our tokenization compress the sequence in a very dynamic way. Despite that, we still outperform the text-only method with a large margin, perform close towards other speech tokenization methods which all have a fixed down-sampling rate. Lastly, our method attains the second-highest MUSHRA score (excluding the ground-truth anchor). This highlights TASTE's effectiveness: even without reproducing every microscopic detail, it still yields perceptually high-quality speech in human listening tests. Overall, TASTE carries rich paralinguistic information, facilitating high-end speech reconstruction under an extremely low bitrate.", + "bbox": [ + 169, + 470, + 826, + 691 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1.2 TASTE for Text-Aligned Speech Editing", + "text_level": 1, + "bbox": [ + 169, + 707, + 504, + 722 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "After comprehensive speech-reconstruction experiments, we show that TASTE can also perform text-aligned speech editing. Suppose we have two utterances with the same transcript but different paralinguistic characteristics. By exchanging their TASTE token sequences word by word, we ask whether the associated paralinguistic traits are transferred as well. To make the effect easy to see, we choose utterances that differ mainly in speaking rate and focus on duration changes. The overall text-aligned editing procedure is describe as follows: 1) Extract the TASTE tokens $\\hat{z}^{\\mathrm{orig}}$ for each source utterance 2) Swap the tokens at the desired text positions, resulting in edited TASTE tokens $\\hat{z}^{\\mathrm{edit}}$ . 3) Decode the edited token sequence $\\hat{z}^{\\mathrm{edit}}$ back to speech. In Figure 3, we present the alignments obtained from MFA [26] of the original speech and the speech after editing, and compare them horizontally. As shown in the figure, words whose tokens were swapped exhibit clear duration shifts, while the untouched words keep their original timing—evidence that TASTE enables precise, text-aligned manipulation. Additional examples that target other paralinguistic dimensions are provided on our demo page.", + "bbox": [ + 169, + 731, + 826, + 912 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2f0bfffa21da24cd7b583028f31b507e7681918acc4a32cdc33e9ee74b4bd996.jpg", + "image_caption": [ + "Figure 3: An illustration of TASTE for text-aligned speech editing. On the left shows the process of our text-aligned speech editing. We first extract the TASTE tokens; swap the tokens partially; and then decode the edited TASTE tokens into edited speech. On the right shows an example visualization. Only the durations of the words with exchanged TASTE tokens show significant difference." + ], + "image_footnote": [], + "bbox": [ + 176, + 88, + 426, + 234 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/81e12a75d5eaba0cef27031e19cf6686815234b8941b4228e622e8d418018a9b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 431, + 88, + 823, + 236 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.2 Evaluating Text-Aligned Spoken Language Modeling", + "text_level": 1, + "bbox": [ + 171, + 325, + 583, + 343 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To provide a comprehensive evaluation of our text-aligned spoken language modeling (TASLM), we first compare our pre-trained SLM with other methods through speech continuation and likelihood-based benchmarks in Section 4.2.1. Then, to further investigate the understanding capabilities, we conduct evaluation with spoken question answering in Section 4.2.2.", + "bbox": [ + 169, + 352, + 826, + 409 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.2.1 Comparing TASLM with Pretrained SLMs", + "text_level": 1, + "bbox": [ + 171, + 422, + 527, + 438 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Speech Continuation Evaluation A typical way to evaluate the pre-trained SLM is by performing conditional generation. Following previous work [11, 22], we use the 3-second prompt speech from the LibriSpeech test-clean. To evaluate the quality of the continuations generated by the SLMs, we employ GPT-4o to assign MOS scores regarding the transcribed speech continuation using ASR, focusing on the semantic coherence of the continuation. In addition, we compute UTMOS as for evaluating the speech quality and naturalness. Last but not least, we conduct human listening test, in which each evaluator is asked to give a MOS score regarding the overall performance of the generated speech continuation. The details of the instructions for GPT-4o and human are in Appendix.", + "bbox": [ + 169, + 446, + 826, + 559 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Likelihood-Based Evaluation Following previous work [11, 22, 33], we also evaluate our SLMs through likelihood-based benchmarks, where the accuracy score is based on whether the model chooses the correct continuation from the two given speech utterances based on its output likelihoods.", + "bbox": [ + 169, + 573, + 826, + 617 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/e45e9d10721807dc88752e032a3bc967c5e37ba7fac2be48a2c5ec4780a4ab95.jpg", + "table_caption": [ + "Table 2: Pretrained SLM speech continuation and likelihood-based next-speech selection results. The superscripts at the bottom of the table indicate the base models used by each SLM, indicated by superscripts. Cascade models refer to the pipeline with ASR (whisper-large-v3), text continuation by LMs, and TTS (CosyVoice). This comparison evaluates SLMs and cascade models in continuation evaluation. As shown in the table, TASLM tends to preserve the semantic capabilities of LMs." + ], + "table_footnote": [ + "Base models: $\\alpha$ LLaMA3.2-1B, $\\beta$ LLaMA2-7B, $\\gamma$ LLaMA-7B, $\\theta$ OPT-1.3B" + ], + "table_body": "
MethodFinetuned / base parametersCONTINUATIONLIKELIHOOD
GPT-4oUTMOSHumanSALMONStoryClozeOverall
Cascade
Cascade (LLaMA3.2-1Bα)-3.154.254.00---
Cascade (LLaMA2-7Bβ)-3.434.253.98---
Spoken LMs
TWIST 1.3B [11]1.3B / 1.3Bθ1.483.251.9562.561.562.0
TWIST 7B [11]7B / 7Bγ1.443.272.0463.464.764.1
Spirit LM [33]7B / 7Bβ2.793.412.3859.172.065.6
Spirit LM Expr. [33]7B / 7Bβ1.903.402.4169.066.267.6
Baseline (S3 token)45M / 1.3Bα1.374.042.8450.258.754.5
TASLM 1B (token)45M / 1.3Bα3.084.073.9360.876.568.7
TASLM 1B (embed.)45M / 1.3Bα3.164.224.1657.776.767.2
", + "bbox": [ + 173, + 717, + 823, + 895 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We adopt two established benchmarks SALMON [25] and spoken StoryCloze [11, 29], which covers the acoustic aspect and the semantic aspect, respectively. Since both benchmarks contain multiple tasks, we report the average accuracy across these tasks within each benchmark for simplicity. The detailed results are in Appendix A.5.1 for the interested readers. We also report the mean of the SALMON and StoryCloze as an overall assessment for both aspects.", + "bbox": [ + 169, + 90, + 823, + 161 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Results The results of TASLM comparing to other pre-trained SLM are in Table 2. First, we highlight that our TASLMs have achieved significantly higher scores on speech continuation across human and machine evaluations; and good performance on the likelihood-based benchmarks. Note that our base language model contains only 1.3 billion parameters, showing the effectiveness of using TASTE for joint modeling. Compared to the cascade method that has the same base model (first row), our $\\mathrm{TASLM}_{\\mathrm{emb}}$ scores comparable on GPT-4o but better on human MOS. This indicates that our generated speech is more natural than the cascade one that utilizes TTS for synthesis. Next, our TASLM is the only SLM that not only maintains but even surpasses the performance of its corresponding text-base model. Moreover, we demonstrate that directly using the S3 token for joint modeling following [46] does not yield performance comparable in any aspect. The result further strengthen the intuition behind TASTE (mitigating the length-mismatch during tokenization stage facilitates effective joint spoken language modeling).", + "bbox": [ + 169, + 175, + 826, + 342 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.2.2 TASLM for Spoken Question Answering", + "text_level": 1, + "bbox": [ + 171, + 356, + 509, + 372 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Following [6], we conduct evaluation on spoken question answering to investigate the understanding ability of our TASLM. For this experiment, we use the $\\mathrm{TASLM}_{\\mathrm{emb}}$ for simplicity. We compare our pre-trained only SLM with other instruction-finetuned joint SLMs such as Mini-Omni [46], Moshi [6], and Llama-Omni [9]. We use two spoken question answering benchmarks, Web Questions [2] and LLaMA-Questions [30], following [30]. We report the accuracy of answer containment. For fairness, we report not only the performance of the speech-text joint SLMs, but also the base text LLM they used if applicable. Our results indicate that our TASLM is the only method that does not", + "bbox": [ + 169, + 378, + 467, + 599 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "degrade the corresponding text base LLM. We attribute the phenomenon to the effectiveness of our TASTE tokenization for joint speech-text modeling.", + "bbox": [ + 169, + 599, + 823, + 628 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/3427cad281a5439a9f7df10b83a4b3dac65abff326e4200bd70cb65812769ba6.jpg", + "table_caption": [ + "Table 3: Evaluation of spoken question answering. Performance across modalities is compared row-wise, where T denotes text and A denotes audio." + ], + "table_footnote": [ + "We apply few-shot learning to facilitate question answering." + ], + "table_body": "
MethodModeWeb Q.LLaMA-Q.
Mini-Omni 0.5B(T→T)T21.339.0
Mini-Omni 0.5B [46]T+A4.511.6
Helium 7B (text)T32.375.0
Moshi 7B [6]T+A26.662.3
LLaMA3.1-8B-InstructT60.471.7
Llama-Omni-8B [9]T+A35.567.3
LLaMA3.2-1B†T24.051.0
TASLM 1B (embed.)†T+A27.157.6
", + "bbox": [ + 478, + 445, + 823, + 578 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 171, + 647, + 302, + 662 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this work, we propose Text-Aligned Speech Tokenization and Embedding (TASTE), to facilitate joint speech-text spoken language modeling. By aggregating proper encoder representation through the specialized cross-attention mechanism and taking the ASR model as initialization, we make the speech tokenization text-aligned in an end-to-end manner with no explicit word alignment required. We conduct extensive evaluation on our TASTE tokenizer. Our results show that TASTE allows high quality speech reconstruction at an extremely low bitrate. With our text-aligned speech tokenization and embedding, joint speech-text modeling becomes straightforward and effective. Our experimental results indicate that TASTE enables turning a text LLM into a spoken one with the simple parameter-efficient finetuning technique applied.", + "bbox": [ + 169, + 679, + 826, + 805 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "**Limitation** Several limitations of our current work point to promising avenues for future research. First, neither our TASTE tokenization nor the text-aligned SLM has been optimized for time efficiency; developing a low-latency, streaming variant remains future work. Second, we have evaluated TASTE only on English data—its portability to other languages deserves thorough investigation. Third, although our pretrained SLM generates high-quality continuations, it does not yet support robust turn-taking or instruction-following behavior, both of which are essential for truly interactive systems.", + "bbox": [ + 169, + 818, + 828, + 905 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 89, + 269, + 106 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Alexei Baevski, Yuhao Zhou, Abdelrahman Mohamed, and Michael Auli. wav2vec 2.0: A framework for self-supervised learning of speech representations. In Advances in Neural Information Processing Systems, 2020.", + "[2] Jonathan Berant, Andrew Chou, Roy Frostig, and Percy Liang. Semantic parsing on freebase from question-answer pairs. In Proceedings of the 2013 conference on empirical methods in natural language processing, 2013.", + "[3] Li-Wei Chen and Alexander Rudnicky. Fine-grained style control in transformer-based text-to-speech synthesis. In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2022.", + "[4] Michael Chinen, Felicia SC Lim, Jan Skoglund, Nikita Gureev, Feargus O'Gorman, and Andrew Hines. Visqol v3: An open source production ready objective speech and audio metric. In 2020 twelfth international conference on quality of multimedia experience (QoMEX), 2020.", + "[5] Alexandre Défossez, Jade Copet, Gabriel Synnaeve, and Yossi Adi. High fidelity neural audio compression. Transactions on Machine Learning Research, 2023.", + "[6] Alexandre Défossez, Laurent Mazaré, Manu Orsini, Amélie Royer, Patrick Pérez, Hervé Jégou, Edouard Grave, and Neil Zeghidour. Moshi: a speech-text foundation model for real-time dialogue. arXiv preprint arXiv:2410.00037, 2024.", + "[7] Zhihao Du, Qian Chen, Shiliang Zhang, Kai Hu, Heng Lu, Yexin Yang, Hangrui Hu, Siqi Zheng, Yue Gu, Ziyang Ma, et al. Cosyvoice: A scalable multilingual zero-shot text-to-speech synthesizer based on supervised semantic tokens. arXiv preprint arXiv:2407.05407, 2024.", + "[8] Zhihao Du, Yuxuan Wang, Qian Chen, Xian Shi, Xiang Lv, Tianyu Zhao, Zhifu Gao, Yexin Yang, Changfeng Gao, Hui Wang, et al. Cosyvoice 2: Scalable streaming speech synthesis with large language models. CoRR, 2024.", + "[9] Qingkai Fang, Shoutao Guo, Yan Zhou, Zhengrui Ma, Shaolei Zhang, and Yang Feng. LlamaOmni: Seamless speech interaction with large language models. CoRR, 2024.", + "[10] Sanchit Gandhi, Patrick von Platen, and Alexander M Rush. Distil-whisper: Robust knowledge distillation via large-scale pseudo labelling. arXiv preprint arXiv:2311.00430, 2023.", + "[11] Michael Hassid, Tal Remez, Tu Anh Nguyen, Itai Gat, Alexis Conneau, Felix Kreuk, Jade Copet, Alexandre Defossez, Gabriel Synnaeve, Emmanuel Dupoux, et al. Textually pretrained speech language models. Advances in Neural Information Processing Systems, 2023.", + "[12] Haorui He, Zengqiang Shang, Chaoren Wang, Xuyuan Li, Yicheng Gu, Hua Hua, Liwei Liu, Chen Yang, Jiaqi Li, Peiyang Shi, et al. Emilia: An extensive, multilingual, and diverse speech dataset for large-scale speech generation. In 2024 IEEE Spoken Language Technology Workshop (SLT), 2024.", + "[13] Pin-Lun Hsu, Yun Dai, Vignesh Kothapalli, Qingquan Song, Shao Tang, Siyu Zhu, Steven Shimizu, Shivam Sahni, Haowen Ning, and Yanning Chen. Liger kernel: Efficient triton kernels for llm training. arXiv preprint arXiv:2410.10989, 2024.", + "[14] Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, and Abdelrahman Mohamed. Hubert: Self-supervised speech representation learning by masked prediction of hidden units. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2021.", + "[15] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuzhhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685, 2021.", + "[16] Zeqian Ju, Yuancheng Wang, Kai Shen, Xu Tan, Detai Xin, Dongchao Yang, Yanqing Liu, Yichong Leng, Kaitao Song, Siliang Tang, et al. Naturalspeech 3: Zero-shot speech synthesis with factorized codec and diffusion models. International Conference on Machine Learning, 2024." + ], + "bbox": [ + 173, + 112, + 826, + 910 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[17] Jaehyeon Kim, Keon Lee, Seungjun Chung, and Jaewoong Cho. Clam-tts: Improving neural codec language model for zero-shot text-to-speech. *ICLR*, 2024.", + "[18] Taku Kudo. Subword regularization: Improving neural network translation models with multiple subword candidates. Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2018.", + "[19] Taku Kudo and John Richardson. Sentencepiece: A simple and language independent subword tokenizer and tokenizer for neural text processing. Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, 2018.", + "[20] Rithesh Kumar, Prem Seetharaman, Alejandro Luebs, Ishaan Kumar, and Kundan Kumar. High-fidelity audio compression with improved rvqgan. Advances in Neural Information Processing Systems, 2023.", + "[21] Kushal Lakhotia, Eugene Kharitonov, Wei-Ning Hsu, Yossi Adi, Adam Polyak, Benjamin Bolte, Tu-Anh Nguyen, Jade Copet, Alexei Baevski, Abdelrahman Mohamed, et al. On generative spoken language modeling from raw audio. Transactions of the Association for Computational Linguistics, 2021.", + "[22] Guan-Ting Lin, Prashanth Gurunath Shivakumar, Aditya Gourav, Yile Gu, Ankur Gandhe, Hung-yi Lee, and Ivan Bulyko. Align-slm: Textless spoken language models with reinforcement learning from ai feedback. arXiv preprint arXiv:2411.01834, 2024.", + "[23] Yaron Lipman, Ricky TQ Chen, Heli Ben-Hamu, Maximilian Nickel, and Matt Le. Flow matching for generative modeling. The Eleventh International Conference on Learning Representations, 2022.", + "[24] Alexander H Liu, Sang-gil Lee, Chao-Han Huck Yang, Yuan Gong, Yu-Chiang Frank Wang, James R Glass, Rafael Valle, and Bryan Catanzaro. Uniwav: Towards unified pre-training for speech representation learning and generation. The Thirteenth International Conference on Learning Representations, 2025.", + "[25] Gallil Maimon, Amit Roth, and Yossi Adi. Salmon: A suite for acoustic language model evaluation. arXiv preprint arXiv:2409.07437, 2024.", + "[26] Michael McAuliffe, Michaela Socolof, Sarah Mihuc, Michael Wagner, and Morgan Sonderegger. Montreal forced aligner: Trainable text-speech alignment using kaldi. In Interspeech 2017, 2017.", + "[27] Shivam Mehta, Ambika Kirkland, Harm Lameris, Jonas Beskow, Éva Székely, and Gustav Eje Henter. Overflow: Putting flows on top of neural transducers for better tts. Interspeech 2023, 2022.", + "[28] Lingwei Meng, Long Zhou, Shujie Liu, Sanyuan Chen, Bing Han, Shujie Hu, Yanqing Liu, Jinyu Li, Sheng Zhao, Xixin Wu, et al. Autoregressive speech synthesis without vector quantization. CoRR, 2024.", + "[29] Nasrin Mostafazadeh, Nathanael Chambers, Xiaodong He, Devi Parikh, Dhruv Batra, Lucy Vanderwende, Pushmeet Kohli, and James Allen. A corpus and evaluation framework for deeper understanding of commonsense stories. Proceedings of NAACL-HLT, 2016.", + "[30] Eliya Nachmani, Alon Levkovitch, Roy Hirsch, Julian Salazar, Chulayuth Asawaroengchai, Soroosh Mariooryad, Ehud Rivlin, RJ Skerry-Ryan, and Michelle Tadmor Ramanovich. Spoken question answering and speech continuation using spectrogram-powered llm. In The Twelfth International Conference on Learning Representations, 2024.", + "[31] Tu Anh Nguyen, Maureen de Seyssel, Patricia Rozé, Morgane Rivière, Evgeny Kharitonov, Alexei Baevski, Ewan Dunbar, and Emmanuel Dupoux. The zero resource speech benchmark 2021: Metrics and baselines for unsupervised spoken language modeling. NeuRIPS Workshop on Self-Supervised Learning for Speech and Audio Processing, 2020." + ], + "bbox": [ + 173, + 90, + 826, + 912 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[32] Tu Anh Nguyen, Eugene Kharitonov, Jade Copet, Yossi Adi, Wei-Ning Hsu, Ali Elkahky, Paden Tomasello, Robin Algayres, Benoit Sagot, Abdelrahman Mohamed, et al. Generative spoken dialogue language modeling. Transactions of the Association for Computational Linguistics, 2023.", + "[33] Tu Anh Nguyen, Benjamin Muller, Bokai Yu, Marta R Costa-Jussa, Maha Elbayad, Sravya Popuri, Christophe Ropers, Paul-Ambroise Duquenne, Robin Algayres, Ruslan Mavlyutov, et al. Spirit-lm: Interleaved spoken and written language model. Transactions of the Association for Computational Linguistics, 2025.", + "[34] Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. Librispeech: An asr corpus based on public domain audio books. In 2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2015.", + "[35] Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. Robust speech recognition via large-scale weak supervision. In International conference on machine learning, 2023.", + "[36] Jeff Rasley, Samyam Rajbhandari, Olatunjri Ruwase, and Yuxiong He. Deepspeed: System optimizations enable training deep learning models with over 100 billion parameters. In Proceedings of the 26th ACM SIGKDD international conference on knowledge discovery and data mining, 2020.", + "[37] Chandan KA Reddy, Vishak Gopal, and Ross Cutler. Dnsmos: A non-intrusive perceptual objective speech quality metric to evaluate noise suppressors. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2021.", + "[38] Takaaki Saeki, Detai Xin, Wataru Nakata, Tomoki Koriyama, Shinnosuke Takamichi, and Hiroshi Saruwatari. Utmos: Utokyo-sarulab system for voicemos challenge 2022. Interspeech 2022, 2022.", + "[39] Rico Sennrich, Barry Haddow, and Alexandra Birch. Neural machine translation of rare words with subword units. Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2016.", + "[40] B Series. Method for the subjective assessment of intermediate quality level of audio systems. International Telecommunication Union Radiocommunication Assembly, 2014.", + "[41] Hubert Siuzdak, Florian Grötschla, and Luca A Lanzendorfer. Snac: Multi-scale neural audio codec. Audio Imagination: NeurIPS 2024 Workshop AI-Driven Speech, Music, and Sound Generation, 2024.", + "[42] Hsiang-Sheng Tsai, Heng-Jui Chang, Wen-Chin Huang, Zili Huang, Kushal Lakhotia, Shu-wen Yang, Shuyan Dong, Andy T Liu, Cheng-I Jeff Lai, Jiatong Shi, et al. Superb-sg: Enhanced speech processing universal performance benchmark for semantic and generative capabilities. Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics, 2022.", + "[43] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 2017.", + "[44] Apoorv Vyas, Bowen Shi, Matthew Le, Andros Tjandra, Yi-Chiao Wu, Baishan Guo, Jiemin Zhang, Xinyue Zhang, Robert Adkins, William Ngan, et al. Audiobox: Unified audio generation with natural language prompts. arXiv preprint arXiv:2312.15821, 2023.", + "[45] Chengyi Wang, Sanyuan Chen, Yu Wu, Ziqiang Zhang, Long Zhou, Shujie Liu, Zhuo Chen, Yanqing Liu, Huaming Wang, Jinyu Li, et al. Neural codec language models are zero-shot text to speech synthesizers. arXiv preprint arXiv:2301.02111, 2023.", + "[46] Zhifei Xie and Changqiao Wu. Mini-omni: Language models can hear, talk while thinking in streaming. arXiv preprint arXiv:2408.16725, 2024." + ], + "bbox": [ + 173, + 90, + 826, + 912 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[47] Detai Xin, Xu Tan, Kai Shen, Zeqian Ju, Dongchao Yang, Yuancheng Wang, Shinnosuke Takamichi, Hiroshi Saruwatari, Shujie Liu, Jinyu Li, et al. Rall-e: Robust codec language modeling with chain-of-thought prompting for text-to-speech synthesis. arXiv preprint arXiv:2404.03204, 2024.", + "[48] Shu-wen Yang, Po-Han Chi, Yung-Sung Chuang, Cheng-I Jeff Lai, Kushal Lakhotia, Yist Y Lin, Andy T Liu, Jiatong Shi, Xuankai Chang, Guan-Ting Lin, et al. Superb: Speech processing universal performance benchmark. Interspeech 2021, 2021.", + "[49] Neil Zeghidour, Alejandro Luebs, Ahmed Omran, Jan Skoglund, and Marco Tagliasacchi. Soundstream: An end-to-end neural audio codec. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2021.", + "[50] Heiga Zen, Viet Dang, Rob Clark, Yu Zhang, Ron J Weiss, Ye Jia, Zhifeng Chen, and Yonghui Wu. Libritts: A corpus derived from librispeech for text-to-speech. Interspeech 2019, 2019.", + "[51] Xin Zhang, Dong Zhang, Shimin Li, Yaqian Zhou, and Xipeng Qiu. Speechtokenizer: Unified speech tokenizer for speech large language models. ICLR, 2024." + ], + "bbox": [ + 173, + 90, + 826, + 325 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "A Technical Appendices and Supplementary Material", + "text_level": 1, + "bbox": [ + 171, + 89, + 638, + 107 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.1 Related Work", + "text_level": 1, + "bbox": [ + 171, + 119, + 316, + 133 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Recent SLMs often require speech tokenization to conduct language modeling with the next prediction objective as the text LLMs. Unlike text, the speech signal is continuous and lengthy, making it difficult to derive proper speech tokenization for spoken language modeling. Common approaches may utilize self-supervised learned (SSL) speech models followed by quantization techniques to extract speech tokens [1, 11, 14, 21, 33]. In addition, audio or speech codec models have also been used for tokenization in recent SLMs [5, 6, 49, 51]. These models are designed for resynthesis, where the speech decoders are jointly learned with the encoders, making them easy to use for spoken language modeling.", + "bbox": [ + 169, + 145, + 826, + 256 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "With speech tokenization, GSLM [21, 32] first demonstrates the possibility of building an SLM that can generate speech. TWIST [11] further shows that SLM can benefit from initialization with the text-pretrained LLM. With regard to the huge success of text-only LLMs, recent work shifts the focus towards joint speech-text modeling [6, 11, 46]. Challenged by the modality gap between speech and text tokens, different techniques are introduced to facilitate joint modeling. Spirit LM [33] adopts an interleaving strategy; moshi [6] trains its own tokenizer with a reduced token frequency. Moreover, different patterns and strategies such as delayed or sequential generation are introduced for joint modeling, aiming for more reasonable and coherent speech outputs [46].", + "bbox": [ + 169, + 262, + 826, + 376 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Despite the increasing demand of joint speech-text modeling [6, 33, 46], we do not find any work discussing the effectiveness of current speech tokenization for it. Moreover, the speech token is often derived with speech or audio-only data2. Nonetheless, we observe that recent work is trying to mitigate the modality gap by reducing frequency speech token or conducting additional training stage for text-speech alignment. This motivates us to design a speech tokenization that is directly aligned with its text counterpart, tackling the mismatch issue during the tokenization stage.", + "bbox": [ + 169, + 380, + 826, + 464 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In the main text, we have mentioned that we utilize a specialized mechanism based on attention to extract and aggregate the encoder representations. We clarify that the text-speech cross-attention mechanism has also been used for fine-grained control of text-to-speech synthesis (TTS). More specifically, Chen and Rudnicky [3] propose content-style cross-attention to indicate their text-speech cross-attention mechanism that enables style transfer in TTS. Although both utilize specialized text-speech cross-attention mechanism, the design choices and problem formulations are completely different. We attribute of our main novelty to inventing a text-aligned speech tokenization and embedding for joint spoken language modeling, and the text-speech cross attention mechanism is considered and shown to be a clean, effective, and straightforward way of achieving it.", + "bbox": [ + 169, + 469, + 826, + 595 + ], + "page_idx": 13 + }, + { + "type": "page_footnote", + "text": "2An exception is CosyVoice [7]. We discuss it in Section 2 since it is related to our method.", + "bbox": [ + 189, + 896, + 732, + 911 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "A.2 Tackling the Vocabulary Mismatch", + "text_level": 1, + "bbox": [ + 171, + 90, + 460, + 106 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The vocabulary mismatch problem lies in the fact that the vocabulary sets are different between the ASR and the LLM, and TASTE is aligned with the text transcription tokens from ASR. Consider that given a text transcription $\\mathbf{v}$ and the vocabulary sets of ASR and LLM denoted as $\\mathbb{V}^{\\mathrm{asr}}$ and $\\mathbb{V}^{\\mathrm{llm}}$ , the ASR tokenized sequence $\\mathbf{v}^{\\mathrm{asr}} = [v_1^{\\mathrm{asr}}, v_2^{\\mathrm{asr}}, \\dots, v_N^{\\mathrm{asr}}]$ , $v_i^{\\mathrm{asr}} \\in \\mathbb{V}^{\\mathrm{asr}}$ and the LLM tokenized sequence $\\mathbf{v}^{\\mathrm{llm}} = [v_1^{\\mathrm{llm}}, v_2^{\\mathrm{llm}}, \\dots, v_M^{\\mathrm{llm}}]$ , $v_i^{\\mathrm{llm}} \\in \\mathbb{V}^{\\mathrm{llm}}$ can be different in terms of token ids and sequence lengths. Since the TASTE token and embedding are aligned with $\\mathbf{v}^{\\mathrm{asr}}$ , we need to derive a method to align them with $\\mathbf{v}^{\\mathrm{llm}}$ for text-aligned speech-text modeling. Notice that $\\mathbf{v}^{\\mathrm{asr}}$ and $\\mathbf{v}^{\\mathrm{llm}}$ both represent $\\mathbf{v}$ , we propose to mitigate the issue through word-level grouping, averaging, and aligning, detailed in Algorithm 1. By crafting TASTE speech tokenization into the word level, we are able to align it with the text tokens of the LLM, denoted as $\\tilde{\\mathbf{q}}, \\tilde{\\mathbf{z}}$ . In practice, we also adopt the word-level averaging technique during the TASTE tokenization training phase, ensuring that the word-level TASTE tokenization facilitates high-quality reconstruction.", + "bbox": [ + 169, + 133, + 826, + 303 + ], + "page_idx": 14 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1 Aligning TASTE with LLM Tokenization via Word-Level Techniques" + ], + "code_body": "1: Initialization: Text transcription $\\pmb{v} = [\\mathrm{word}_1,\\mathrm{word}_2,\\dots ,\\mathrm{word}_W]$ ASR tokens of the transcription $v^{\\mathrm{asr}} = [v_1^{\\mathrm{asr}},v_2^{\\mathrm{asr}},\\dots ,v_N^{\\mathrm{asr}}]$ TASTE embedding $\\hat{z} = [\\hat{z}_1,\\hat{z}_2,\\dots ,\\hat{z}_N]$ LLM tokens of the transcription $v^{\\mathrm{llm}} = [v_1^{\\mathrm{llm}},v_2^{\\mathrm{llm}},\\dots ,v_M^{\\mathrm{llm}}]$ \n2: procedure WORDLEVELGROUPING(v, $v^{\\mathrm{asr}},\\hat{z},v^{\\mathrm{llm}})$ \n3: Since $v^{\\mathrm{asr}}$ is a token sequence represents $\\mathbf{v}$ we can easily group it by words: \n4: $\\pmb{v}_{\\mathrm{grouped}}^{\\mathrm{asr}}\\leftarrow \\underbrace{\\left[(v_{1}^{\\mathrm{asr}},v_{2}^{\\mathrm{asr}},v_{3}^{\\mathrm{asr}})_{1},(v_{4}^{\\mathrm{asr}})_{2},\\ldots,(v_{N - 1}^{\\mathrm{asr}},v_{N}^{\\mathrm{asr}})_{W}\\right]}_{\\text{word}_1}$ ▷Group $v^{\\mathrm{asr}}$ by the words of $\\pmb{v}$ \n5: With the word-level grouping from $v^{\\mathrm{asr}}$ , we can group TASTE embedding $\\hat{z}$ as well: \n6: $\\hat{z}_{\\mathrm{grouped}}\\gets \\left[(\\hat{z}_1,\\hat{z}_2,\\hat{z}_3)_1,(\\hat{z}_4)_2,\\dots,(\\hat{z}_{N - 1},\\hat{z}_N)W\\right]$ \n7: Finally, we can group $v^{\\mathrm{llm}}$ following the similar procedure of grouping $v^{\\mathrm{asr}}$ . \n8: $\\pmb{v}_{\\mathrm{grouped}}^{\\mathrm{llm}}\\leftarrow \\underbrace{\\left[(v_{1}^{\\mathrm{llm}},v_{2}^{\\mathrm{llm}})_{1},(v_{3}^{\\mathrm{llm}},v_{4}^{\\mathrm{llm}})_{2},\\dots,(v_{M - 2}^{\\mathrm{llm}},v_{M - 1}^{\\mathrm{llm}},v_{M}^{\\mathrm{llm}})_{W}\\right]}_{\\text{word}_2}$ \n9: Due to the vocabulary mismatch, the grouping of $v^{\\mathrm{llm}}$ is different from $v^{\\mathrm{asr}}$ , $\\hat{z}_{\\mathrm{grouped}}$ \n10: end procedure \n11: procedure WORDLEVELAVERAGING( $\\hat{z}_{\\mathrm{grouped}}$ ) \n12: $\\bar{z}\\gets []$ ▷ Initialize a new sequence \n13: for word group index $i\\gets 1$ to $W$ do \n14: word group $(\\hat{z}_j,\\dots,\\hat{z}_k)\\gets \\hat{z}_{\\mathrm{grouped}}[i]$ \n15: $\\bar{z}_{[j:k]}\\gets \\text{Average} ((\\hat{z}_j,\\dots,\\hat{z}_k))$ ▷Average the word group \n16: append $\\bar{z}_{[j:k]}$ to $\\bar{z}$ \n17: end for \n18: Resulting in word-level TASTE embedding $\\bar{z}\\in \\mathbb{R}^{W\\times d_z}$ , where $W$ is the word length of $V$ \n19: end procedure \n20: procedure ALIGNWORDLEVELEMBEDGINGWITHLLM( $\\bar{z},v_{\\mathrm{grouped}}^{\\mathrm{llm}}$ ) \n21: $\\tilde{z}\\gets []$ ▷ Initialize a new sequence \n22: for word group index $i\\gets 1$ to $W$ do \n23: word group $(v_j^{\\mathrm{llm}},\\dots,v_k^{\\mathrm{llm}})\\gets v_{\\mathrm{grouped}}[i]$ \n24: $M\\gets \\text{Length}((v_j^{\\mathrm{llm}},\\dots,v_k^{\\mathrm{llm}}))$ ▷Get the length of the word group. \n25: for $m\\gets 1$ to $M$ do ▷add $M\\times \\bar{z}[i]$ into the aligned sequence $\\tilde{z}$ \n26: append $\\tilde{z}[i]$ to $\\tilde{z}$ \n27: end for \n28: end for \n29: end procedure \n30: return The LLM-aligned word-level TASTE embedding $\\tilde{z}$ and its codes form $\\tilde{\\boldsymbol{q}}$", + "bbox": [ + 173, + 375, + 826, + 906 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A.3 Training Details", + "text_level": 1, + "bbox": [ + 171, + 90, + 330, + 104 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We separate the training process into the two phases: deriving TASTE tokenization and conducting spoken language modeling with TASTE. In the tokenization phase, only the Aggregator, Quantizer, and the UnitDecoder is trainable. We use the Adam optimizer and the learning rate is set to 0.0016. The batch size is set to 160 seconds on each of the 8 NVIDIA A6000 GPUs we used. Note that in the first 2 epochs the quantization is not applied. From the beginning of the third epoch, quantization is applied and the Quantizer starts to be updated. We train the TASTE tokenizer for 5 epochs, which takes about 2 days for learning, with the learning rate gradually decayed.", + "bbox": [ + 169, + 119, + 826, + 218 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "As for the spoken language modeling training phase, we use the AdamW optimizer, the Consine scheduler with the learning rate set to 1e-5. We use 8 Nvidia A6000 GPUs for training. The total batch size summation over the GPUs is set to 768 samples with the gradient accumulation steps set to 2. To reduce the memory overhead and the computational cost, we employ bfloat16 mixed precision during training. Tools such as DeepSpeed [36] and Liger Kernel [13] are also applied to speed up the fine-tuning process.", + "bbox": [ + 169, + 223, + 823, + 308 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A.4 Evaluation Details", + "text_level": 1, + "bbox": [ + 171, + 332, + 344, + 345 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A.4.1 Human Evaluation", + "text_level": 1, + "bbox": [ + 171, + 359, + 361, + 375 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We conduct human listening tests through Amazon Mechanical Turk. In each experiment, we randomly select the same 20 samples from each method; and for each sample we collect more than 10 evaluation scores across different human evaluators.", + "bbox": [ + 169, + 388, + 823, + 429 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "MUSHRA In Table 1, we have shown our result of the MUSRHA human listening test [40]. Following [51], we conduct the evaluation with a hidden reference but without a lowerpass-filtered anchor. We instruct evaluators to rate the perceptual quality of the given samples with respect to the ground truth on a scale of 1 to 100.", + "bbox": [ + 169, + 453, + 826, + 510 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Speech Continuation MOS In Table 2, we mention that we have conducted the human listening test to evaluate the overall performance of the speech continuations. Here, we present the instruction for human speech continuation MOS evaluation as follows:", + "bbox": [ + 169, + 532, + 823, + 575 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Instruction for Human Speech Continuation MOS Evaluation", + "text_level": 1, + "bbox": [ + 197, + 593, + 602, + 608 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "In this test, each sample will contain a short audio clip called \"prompt\" (3 seconds) and a longer audio clip called \"prompt+continuation\" ( $\\sim$ 15 seconds).", + "bbox": [ + 196, + 619, + 797, + 646 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "You will be asked to rate the speech quality of the \"prompt+continuation\" audio clip, specifically focus on the \"continuation\" part.", + "bbox": [ + 196, + 646, + 797, + 672 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The rating should be based on how likely you think that the long audio is a proper continuation of the \"prompt\" audio.", + "bbox": [ + 196, + 672, + 797, + 696 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Specifically, the rating should be based on the following scale:", + "bbox": [ + 196, + 696, + 566, + 710 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1: Bad - The \"continuation\" is not distinguishable or not natural.", + "2: Poor - The \"continuation\" is $25\\%$ distinguishable.", + "3: Fair - The \"continuation\" is $50\\%$ distinguishable and natural.", + "4: Good - The \"continuation\" is $75\\%$ distinguishable and natural.", + "5: Excellent - The \"continuation\" is distinguishable, meaningful, and natural." + ], + "bbox": [ + 196, + 734, + 653, + 797 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Distinguishable means that the words in the \"continuation\" is distinguishable.", + "bbox": [ + 196, + 821, + 661, + 835 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Natural means that the \"continuation\" sounds like a real human voice and a natural continuation of the prompt without considering the content of the speech.", + "bbox": [ + 196, + 835, + 797, + 861 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Meaningful means that you can not only distinguish the words but also understand the meaning of the whole \"prompt+continuation\".", + "bbox": [ + 196, + 861, + 797, + 886 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 935, + 509, + 946 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A.4.2 GPT-4o for MOS Evaluation", + "text_level": 1, + "bbox": [ + 171, + 90, + 429, + 104 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "As introduced in Section 4.2.1, we use GPT-4o to assign MOS scores to the speech continuation results. Here, we describe the detailed procedure. First, whisper-large-v3 is applied to transcribe the generated speech. Then, given the transcription, the text content from the prompt audio, and the instruction template, GPT-4o can produce a score between 1 and 5. The instruction template is provided below:", + "bbox": [ + 169, + 114, + 823, + 184 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Instruction Prompt for GPT-4o MOS Evaluation", + "text_level": 1, + "bbox": [ + 197, + 195, + 516, + 210 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The task is evaluating the relevance and likelihood of the predicted text continuation, given the text prompt. You should also consider whether the meaning of the text continuation is making sense. The text prompt is:", + "bbox": [ + 196, + 222, + 769, + 261 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "{\"prompt\"}", + "bbox": [ + 197, + 261, + 277, + 273 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": ", and the text continuation is :", + "bbox": [ + 199, + 273, + 446, + 284 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "{\"content\"}", + "bbox": [ + 199, + 285, + 285, + 297 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "You must give an overall rating from 1 to 5. The rating guideline is as below:", + "bbox": [ + 197, + 310, + 754, + 335 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "1: The text continuation is very unlikely and irrelevant to the text prompt.", + "bbox": [ + 197, + 335, + 789, + 349 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "2: The text continuation is unlikely and marginally relevant to the text prompt.", + "bbox": [ + 197, + 349, + 761, + 375 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "3: The text continuation is moderately likely and relevant to the text prompt.", + "bbox": [ + 197, + 375, + 746, + 398 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "4: The text continuation is likely and relevant to the text prompt.", + "bbox": [ + 197, + 400, + 720, + 412 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "5: The text continuation is very likely and highly relevant.", + "bbox": [ + 197, + 412, + 666, + 424 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "You should take the following steps to provide the score:", + "bbox": [ + 197, + 425, + 635, + 436 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "First: briefly analyze the sample with the above definition.", + "bbox": [ + 197, + 438, + 666, + 449 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Second: MUST follow the output format as: I would rate the score as _", + "bbox": [ + 197, + 450, + 746, + 462 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A.5 Additional Results", + "text_level": 1, + "bbox": [ + 171, + 500, + 346, + 513 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A.5.1 Details on SALMON and StoryCloze", + "text_level": 1, + "bbox": [ + 171, + 525, + 485, + 541 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Our detailed results on SALMON and StoryCloze are reported in Table 4. The introductions of the two benchmarks—SALMON and StoryCloze—are described below.", + "bbox": [ + 169, + 550, + 823, + 579 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "SALMON for Acoustic Evaluation SALMON offers a comprehensive set of metrics designed to evaluate SLMs in multiple dimensions. In summary, each test sample consists of a positive sample and a negative sample. The negative sample differs from the positive sample by having some segments altered. These alterations include changes in speaker, gender, environment (e.g., room acoustics), or sentiment in the middle of the utterance. The SLM serves as an anomaly detector that aims to distinguish between the pairs of positive and negative samples. The distinction is based on", + "bbox": [ + 169, + 593, + 823, + 678 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/082bb6aa5d9a46479ef69e67a1c255233b2ead7447bd21827e559bc7215a53a7.jpg", + "table_caption": [ + "Table 4: The evaluation results on SALMON and StoryCloze of different SLMs, and BG means background. We report likelihood-based accuracy on SALMON (acoustic aspect) and StoryCloze (semantic aspect). The baseline (S3 token) is conducted by joint speech-text modeling with the S3 token as speech tokenization." + ], + "table_footnote": [], + "table_body": "
METHODLoRASALMON (ACOUSTIC CONSISTENCY)STORYCLOZE
SentimentSpeakerGenderRoomBG (domain)BG (rand.)sSC / tSC
Previous Work
TWIST 1.3B ([11])X61.5±3.469.0±3.369.5±3.359.0±3.555.5±3.560.5±3.552.4±0.8 / 70.6±0.7
TWIST 7B ([11])X61.5±3.471.0±3.270.0±3.262.0±3.455.5±3.560.5±3.555.3±0.8 / 74.1±0.7
Spirit LM ([33])X54.5±3.569.5±3.367.0±3.354.5±3.553.5±3.555.5±3.561.0±0.8 / 82.9±0.6
Spirit LM Expr. ([33])X73.5±3.181.0±2.885.0±2.554.5±3.556.0±3.564.0±3.456.9±0.8 / 75.4±0.7
Ours
Baseline (S3 token)49.5±3.548.8±3.548.8±3.549.5±3.555.3±3.549.5±3.554.4±0.8 / 63.0±0.8
TASLM 1B (token)59.0±3.568.0±3.370.5±3.261.0±3.452.0±3.554.0±3.564.2±0.8 / 88.9±0.5
TASLM 1B (embedding)57.5±3.567.0±3.375.5±3.050.0±3.547.0±3.549.0±3.564.0±0.8 / 89.5±0.5
", + "bbox": [ + 173, + 765, + 823, + 907 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "the likelihood score given by each SLM, which is then evaluated with the overall precision between the ground truth and the prediction.", + "bbox": [ + 169, + 90, + 823, + 119 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "StoryCloze for Semantic Evaluation To evaluate the SLMs' ability to comprehend semantic coherence and logical reasoning, we employ the spoken version of StoryCloze test (sSC) and the Topic StoryCloze test (tSC) assembled by ([11]). Assessment of narrative understanding involves presenting a four-sentence story setup, followed by two possible endings. These tasks require the model to select the most appropriate conclusion, thereby testing its grasp of causal and temporal relationships within a narrative. Similarly to SALMON, we measure the accuracy of the distinctions based on the likelihood scores.", + "bbox": [ + 169, + 133, + 826, + 232 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A.5.2 Report of Standard Deviations", + "text_level": 1, + "bbox": [ + 171, + 246, + 444, + 262 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We report the standard deviations of our tables in the main text to allow further investigation.", + "bbox": [ + 169, + 268, + 781, + 286 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/c37da4e246327bdeb5e0364e3b1c15c3f51d38c47be643ae90b43b1c67a0812a.jpg", + "table_caption": [ + "Table 5: Results with standard deviations of Table 1" + ], + "table_footnote": [], + "table_body": "
MethodBitrateQUALITYSIMILARITY
WER ↓UTMOSDNSMOSViSQOLDrtn. Con.Spkr. Sim.MUSHRA
Ground Truth256k2.1%±0.074.09±0.323.84±0.26---76.6±15.9
Encodec [5]15005.1%±0.111.58±0.343.26±0.243.46±0.280.94±0.0030.63±0.10-
30002.6%±0.082.35±0.533.48±0.253.81±0.270.96±0.0020.78±0.0725.6±18.6
SpeechTokenizer [51]5005.2%±0.111.27±0.052.99±0.172.80±0.240.94±0.0030.35±0.09-
20003.0%±0.083.56±0.433.60±0.283.65±0.220.97±0.0020.80±0.0653.9±22.9
40002.5%±0.083.90±0.363.76±0.284.03±0.170.98±0.0020.92±0.04-
Mimi [6]10003.1%±0.093.60±0.373.60±0.303.62±0.260.96±0.0020.82±0.0667.6±19.8
S3 token (topline) [7]6003.0%±0.094.18±0.273.90±0.243.30±0.260.96±0.0020.82±0.0970.2±17.0
Text-only (baseline)~505.9%±0.114.31±0.164.11±0.222.44±0.230.57±0.0060.78±0.0942.6±27.1
TASTE (ours)~1504.4%±0.114.29±0.184.10±0.223.05±0.260.91±0.0030.80±0.0868.3±17.1
", + "bbox": [ + 173, + 319, + 823, + 484 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/1b178f39d74600bee181610c32c18474ca8adc9c9bfb2aa49ac9649a63e89cf4.jpg", + "table_caption": [ + "Table 6: Results with standard deviations of Table 2." + ], + "table_footnote": [ + "Base models: $\\alpha$ LLaMA3.2-1B, $\\beta$ LLaMA2-7B, $\\gamma$ LLaMA-7B, $\\theta$ OPT-1.3B" + ], + "table_body": "
MethodFinetuned / base parametersCONTINUATIONLIKELIHOOD
GPT-4oUTMOSHumanSALMONStoryClozeOverall
Cascade
Cascade (LLaMA3.2-1Bα)-3.15±1.274.25±0.224.00±1.28---
Cascade (LLaMA2-7Bβ)-3.43±1.274.25±0.253.98±1.29---
Spoken LMs
TWIST 1.3B [11]1.3B / 1.3Bθ1.48±0.703.25±0.481.95±1.0162.5±1.461.5±0.562.0±0.7
TWIST 7B [11]7B / 7Bγ1.44±0.703.27±0.522.04±0.9163.4±1.464.7±0.564.1±0.7
Spirit LM [33]7B / 7Bβ2.79±1.063.41±0.192.38±0.8159.1±1.472.0±0.565.6±0.7
Spirit LM Expr. [33]7B / 7Bβ1.90±1.033.40±0.302.41±0.9669.0±1.366.2±0.567.6±0.7
Baseline (S3 token)45M / 1.3Bα1.37±0.874.04±0.272.84±1.1150.2±1.458.7±0.654.5±0.8
TASLM 1B (token)45M / 1.3Bα3.08±1.374.07±0.283.93±1.3060.8±1.476.5±0.568.7±0.7
TASLM 1B (embed.)45M / 1.3Bα3.16±1.334.22±0.214.16±1.2057.7±1.476.7±0.567.2±0.7
", + "bbox": [ + 173, + 527, + 823, + 691 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/1a8ccabf79ad97b3b3d47d64ea800c7d4361d2c185e0e4c15151ce2ffc4f4b4e.jpg", + "table_caption": [ + "Table 7: Results with standard deviations of Table 3." + ], + "table_footnote": [ + "We apply few-shot learning to facilitate question answering." + ], + "table_body": "
MethodModeWeb Q.LLaMA-Q.
Mini-Omni 0.5B(T→T)T21.3±0.939.0±2.8
Mini-Omni 0.5B [46]T+A4.5±0.511.6±1.8
Helium 7B (text)T32.3±1.075.0±2.5
Moshi 7B [6]T+A26.6±1.062.3±2.8
LLaMA3.1-8B-InstructT60.4±1.171.7±2.6
Llama-Omni-8B [9]T+A35.5±1.167.3±2.7
LLaMA3.2-1B†T24.0±0.951.0±2.9
TASLM 1B (embed.)†T+A27.1±1.057.6±2.9
", + "bbox": [ + 321, + 750, + 676, + 878 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 17 + } +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07053/5069c415-7c54-431e-b348-92054587ecc3_model.json b/data/2025/2504_07xxx/2504.07053/5069c415-7c54-431e-b348-92054587ecc3_model.json new file mode 100644 index 0000000000000000000000000000000000000000..9f9a64906dfe44b5a2e3630a079fff43de4848d6 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/5069c415-7c54-431e-b348-92054587ecc3_model.json @@ -0,0 +1,2777 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.214, + 0.122, + 0.784, + 0.175 + ], + "angle": 0, + "content": "TASTE: Text-Aligned Speech Tokenization and Embedding for Spoken Language Modeling" + }, + { + "type": "text", + "bbox": [ + 0.19, + 0.224, + 0.812, + 0.243 + ], + "angle": 0, + "content": "Liang-Hsuan Tseng\\*23 Yi-Chang Chen\\*1 Kuan-Yi Lee\\*23 Da-Shan Shiu\\*1 Hung-yi Lee\\*3" + }, + { + "type": "text", + "bbox": [ + 0.354, + 0.254, + 0.644, + 0.269 + ], + "angle": 0, + "content": "*Equal contribution 1MediaTek Research" + }, + { + "type": "text", + "bbox": [ + 0.282, + 0.269, + 0.715, + 0.285 + ], + "angle": 0, + "content": "\\(^{2}\\)Internship at MediaTek Research \\(^{3}\\)National Taiwan University" + }, + { + "type": "text", + "bbox": [ + 0.327, + 0.297, + 0.672, + 0.311 + ], + "angle": 0, + "content": "{yi-chang.chen, ds.shiu}@mtkresearch.com" + }, + { + "type": "text", + "bbox": [ + 0.31, + 0.312, + 0.688, + 0.325 + ], + "angle": 0, + "content": "{f11921067,b10901091,hungyilee}@ntu.edu.tw" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.361, + 0.538, + 0.377 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.392, + 0.769, + 0.656 + ], + "angle": 0, + "content": "Recent efforts target spoken language models (SLMs) that not only listen but also speak for more natural human-LLM interaction. Joint speech-text modeling is a promising direction to achieve this. However, the effectiveness of recent speech tokens for joint modeling remains underexplored. To address this, we introduce Text-Aligned Speech Tokenization and Embedding (TASTE), a method that directly addresses the modality gap by aligning speech token with the corresponding text transcription during the tokenization stage. We propose a method that can achieve this through a attention-based aggregation mechanism and with speech reconstruction as the training objective. We conduct extensive experiments and show that TASTE can preserve essential paralinguistic information while dramatically reducing the token sequence length. With TASTE, we perform straightforward joint spoken language modeling by using Low-Rank Adaptation on the pre-trained text LLM. Experimental results show that TASTE-based SLMs perform comparable to previous work on SALMON and StoryCloze; while significantly outperform other pre-trained SLMs on speech continuation across subjective and objective evaluations. To our knowledge, TASTE is the first end-to-end approach that utilizes a reconstruction objective to automatically learn a text-aligned speech tokenization and embedding suitable for spoken language modeling. Our demo, code, and model are available at https://mtkresearch.github.io/TASTE-SpokenLM.github.io." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.682, + 0.314, + 0.699 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.714, + 0.828, + 0.827 + ], + "angle": 0, + "content": "Learning a speech representation suitable for universal speech-processing tasks has long been a significant challenge [31, 42, 48]. Unlike text—which can be encoded discretely [18, 19, 39]—speech is a continuous waveform carrying layered information (acoustic, semantic, prosodic, etc.). Recent neural self-supervised learning (SSL) methods move beyond filter-banks and MFCCs to encode raw audio into compact, high-dimensional embeddings that excel on discriminative tasks such as automatic speech recognition (ASR), emotion recognition, and speaker verification. Despite these gains, learning representations for generative speech tasks remains an open and more complex problem that has begun to attract focused attention [17, 28, 44, 45]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.831, + 0.828, + 0.903 + ], + "angle": 0, + "content": "Among the generative speech tasks, spoken language modeling (SLM) is an intriguing direction, aiming to create models that can not only listen but also speak. Typically, building an SLM requires two stages: first, deriving speech tokenizations; second, training a language model on the speech tokens. For the speech tokens, previous approaches either apply SSL-based representations following by discretization techniques [11, 14, 21, 32] or reuse units from neural codec models like EnCodec" + }, + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.259, + 0.061, + 0.708 + ], + "angle": 270, + "content": "arXiv:2504.07053v2 [cs.CL] 22 May 2025" + }, + { + "type": "footer", + "bbox": [ + 0.172, + 0.923, + 0.316, + 0.938 + ], + "angle": 0, + "content": "Preprint. Under review." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.177, + 0.09, + 0.382, + 0.208 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.389, + 0.09, + 0.822, + 0.208 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.216, + 0.829, + 0.273 + ], + "angle": 0, + "content": "Figure 1: The concept overview. Conventional methods extract speech tokens solely from speech, inducing length-mismatch problem when conducting joint speech-text modeling. By taking dual modalities as input, we generate speech tokenization that is aligned with text, facilitating straightforward and effective joint modeling." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.312, + 0.825, + 0.355 + ], + "angle": 0, + "content": "and SoundStream [5, 20, 41, 49]. Although autoregressive modeling with these speech tokens shows great potential in text-to-speech (TTS) [45, 47], previous SLMs that model only speech tokens [21, 32] have been shown to lack semantic fidelity [22]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.36, + 0.825, + 0.472 + ], + "angle": 0, + "content": "To bridge this gap, one promising direction is to utilize text—which is rich in semantic—during spoken language modeling. TWIST [11] shows that SLMs can benefit from initializing with text LLMs. More recent work often conducts joint speech-text modeling on tokens of both modalities to facilitate the semantic coherence on the generated speech [6, 9, 33, 46]. Yet integrating text and speech tokens introduces a length-mismatch challenge, as speech token sequences are usually longer than their text counterparts. Common remedies may include interleaving speech and text tokens [33] or inserting padding to synchronize sequence lengths between modalities [6, 9, 46]. However, these methods require either additional speech-text alignment or heuristic rules to enable joint modeling." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.478, + 0.827, + 0.618 + ], + "angle": 0, + "content": "In this work, we introduce Text-Aligned Speech Tokenization and Embedding (TASTE), a special type of speech tokenization tailored for speech-text joint spoken language modeling. By acknowledging that the length mismatch introduces additional complexity in joint modeling, we develop our speech token to be aligned with its corresponding text transcription tokens. To achieve this, we first obtain the textual transcription of a speech with the ASR model; then we derive the speech token based on the transcription through a specialized cross-attention mechanism for speech reconstruction. Note that the full process can be accomplished in an end-to-end manner, with no explicit speech-text alignment required. Unlike previous speech tokens that are developed under a fixed stride with fixed down-sampling rate, our speech token has dynamic frequency as it is text-aligned. Figure 1 shows an overall concept of TASTE, illustrating how our tokenization allows straightforward joint modeling." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.622, + 0.825, + 0.734 + ], + "angle": 0, + "content": "To evaluate the effectiveness of TASTE, we first conduct extensive experiments on speech reconstruction. Our results on LibriSpeech [34] show that TASTE not only resynthesizes speech in high quality, but also retains similarity to the original speech. TASTE achieves high-end reconstruction at an extremely low bit rate (\\(\\sim 150\\) bps); while the other comparable methods are often more than thousands of bps. More intriguingly, we demonstrate that TASTE allows simple text-aligned speech editing. By exchanging the partial text-aligned speech tokens from two different utterances with the same content, we demonstrate that the paralinguistic information such as duration and tone can be exchanged precisely following the words being exchanged, resulting in natural edited speech." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.739, + 0.827, + 0.878 + ], + "angle": 0, + "content": "On the other hand, we demonstrate that TASTE successfully allows effective spoken language modeling. We perform straightforward joint modeling with TASTE under Low-Rank Adaptation [15]. We first perform speech continuation experiments with 3-second speech prompts given. The evaluation is three-fold. We use GPT-4o for evaluating the semantic aspect; UTMOS [38] for the acoustic aspect; and the human listening test for the general evaluation. Results show that our SLMs not only generate natural, meaningful speech continuations, but also outperforms the other 7B pre-trained SLMs across all the continuation evaluation aspects with 1.3B parameters. We also evaluate our SLMs on two benchmarks, SALMON [25] and StoryCloze [11] and our results show that our SLMs achieve comparable performance compared to the other speech-text joint modeling methods. Moreover, we show that our pretrained SLM can perform spoken question answering under few-shot scenario." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.827, + 0.913 + ], + "angle": 0, + "content": "In summary, we derive TASTE, a text-aligned speech tokenization that allows effective joint speech-text spoken language modeling. By aligning the speech tokenization with its text counterpart during" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.178, + 0.096, + 0.817, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.283, + 0.825, + 0.34 + ], + "angle": 0, + "content": "Figure 2: The overall framework of our text-aligned speech tokenization and embedding. The left side illustrate the process of obtaining the TASTE tokenization \\(\\hat{z}\\), detailed in Section 2.1.1; while the right side demonstrate how we reconstruct the speech with TASTE (Section 2.1.2). The training objective for our speech reconstruction is discussed in Section 2.1.3." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.367, + 0.825, + 0.424 + ], + "angle": 0, + "content": "the tokenization stage, TASTE enables straightforward modeling. To our best knowledge, we are the first one to utilize the reconstruction objective to automatically derive a text-aligned speech tokenization and embedding that is suitable for joint speech-text spoken language modeling. Our demo is available at https://mtkresearch.github.io/TASTE-SpokenLM.github.io." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.442, + 0.273, + 0.457 + ], + "angle": 0, + "content": "2 Method" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.473, + 0.828, + 0.517 + ], + "angle": 0, + "content": "We propose text-aligned speech tokenization and embedding (TASTE) to facilitate effective joint speech-text spoken language modeling. Here, we first introduce how we derive our tokenization—TASTE—in Section 2.1, and then discuss how we use TASTE for spoken language modeling (§ 2.2)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.532, + 0.33, + 0.546 + ], + "angle": 0, + "content": "2.1 Building TASTE" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.557, + 0.825, + 0.614 + ], + "angle": 0, + "content": "As depicted in Figure 2, TASTE is comprised of the two main components: the text-aligned speech tokenizer (\\(\\S\\) 2.1.1) that produces the text-aligned speech tokenization; and the speech decoder (\\(\\S\\) 2.1.2) to reconstruct speech based on the text token and the TASTE speech token aligned with it. The training objective of speech reconstruction is described in Section 2.1.3." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.628, + 0.405, + 0.643 + ], + "angle": 0, + "content": "2.1.1 TASTE Speech Tokenizer" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.652, + 0.825, + 0.75 + ], + "angle": 0, + "content": "In TASTE, the speech tokenizer, denoted as Tokenizer(\\cdot), is designed to generate the text-aligned speech tokenization and embedding with the speech-text pair \\( X = (u, v) \\) taken as input, where \\( v \\) represents the textual transcription of the speech utterance \\( u \\), which can be easily obtained through an automatic speech recognition (ASR) system. Recent developments in robust and efficient ASR ([10, 35]) allow us to focus on discussing how to derive the text-aligned speech token effectively by assuming that \\( v \\) is of sufficient quality. The TASTE speech tokenizer is composed of three major components: an encoder, an aggregator, and a quantizer." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.755, + 0.826, + 0.843 + ], + "angle": 0, + "content": "The encoder Encoder \\((\\cdot)\\) contains \\(L\\) layers of Transformer ([43]) encoder blocks and is used to extract high-dimensional speech representation. We employ the pre-trained Whisper ASR encoder [35] as our speech encoder, and it is frozen during training. For an input speech utterance \\(\\pmb{u}\\), the encoder produces a sequence of hidden states from each layer \\([h^{(1)}, h^{(2)}, \\dots, h^{(L)}]\\). In our experiments, we retain the last hidden layer representation \\(h^{(L)}\\) and the shallow representation \\(h^{(l)}\\) from the first half of the hidden representations of the encoder for later usage, denoted as:" + }, + { + "type": "equation", + "bbox": [ + 0.328, + 0.859, + 0.669, + 0.88 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {h} ^ {(L)}, \\boldsymbol {h} ^ {(l)} = \\operatorname {E n c o d e r} (\\boldsymbol {u}), \\quad \\text {w h e r e} 1 \\leq l \\leq \\left\\lfloor \\frac {L}{2} \\right\\rfloor .\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.883, + 0.825, + 0.913 + ], + "angle": 0, + "content": "Note that both of the hidden representations \\( \\pmb{h}^{(L)}, \\pmb{h}^{(l)} \\in \\mathbb{R}^{T \\times d_h} \\) have their length denoted as \\( T \\) and the hidden dimension indicated by \\( d_h \\)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.149 + ], + "angle": 0, + "content": "The hidden representations extracted from the encoder are then passed to the aggregator. The aggregator is designed to obtain a more compressed speech representation \\( \\mathbf{z} \\) that is aligned in length with the text transcription \\( \\mathbf{v} \\). Consider that \\( \\mathbf{v} = [v_{1}, v_{2}, \\dots, v_{N}] \\), \\( v_{i} \\in \\mathbb{V} \\) is a text token sequence with length \\( N \\), the input and output of the aggregator can be denoted as:" + }, + { + "type": "equation", + "bbox": [ + 0.209, + 0.152, + 0.787, + 0.171 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {z} = \\operatorname {A g g r e g a t o r} (\\boldsymbol {v}, \\boldsymbol {h} ^ {(L)}, \\boldsymbol {h} ^ {(l)}), \\text {w h e r e} \\boldsymbol {z} \\in \\mathbb {R} ^ {N \\times d _ {z}}, \\boldsymbol {v} \\in \\mathbb {V} ^ {N}, \\text {a n d} \\boldsymbol {h} ^ {(L)}, \\boldsymbol {h} ^ {(l)} \\in \\mathbb {R} ^ {T \\times d _ {h}}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.175, + 0.825, + 0.218 + ], + "angle": 0, + "content": "To make the speech representation \\( z \\) text-aligned, we conduct a simple yet effective attention mechanism based on the three inputs. Consider that the original multi-head attention in [43] is denoted as MultiHead(\\( Q, K, V \\)), our first layer attention in the aggregator takes:" + }, + { + "type": "equation", + "bbox": [ + 0.18, + 0.223, + 0.814, + 0.24 + ], + "angle": 0, + "content": "\\[\nQ = \\text {t e x t} v, \\quad K = \\text {e n c o d e r} h ^ {(L)}, \\quad V = \\text {e n c o d e r} h ^ {(l)}.\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.244, + 0.827, + 0.383 + ], + "angle": 0, + "content": "By doing so, the length of our first multi-head attention output should follow the text transcription \\( v \\). Note that the query of the following layers becomes the output from the previous layer. In addition, intuitions of using the encoder's last hidden representation as keys, and the shallow hidden representation as values can be described as follows: 1) In Transformer-based ASR models, the last hidden states often encode rich speech-text alignment cues; sometimes the cross-attention weight matrices can even be exploited as soft word-alignment maps [10, 35]. 2) The shallow representation has been shown to support high-quality speech reconstruction even when the quantization is applied [7, 8]. Based on the above observations, we design our aggregator that can use the soft attention maps obtained from last encoder representations and the text transcriptions, to aggregate the shallow encoder representations that is beneficial for high-end speech reconstruction." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.389, + 0.825, + 0.446 + ], + "angle": 0, + "content": "After getting the text-aligned representation, the quantizer \\(\\mathrm{Quantizer}(\\cdot)\\) is adopted to discretize the text-aligned representation. We use the residual vector quantization (RVQ) to allow coarse-to-fine quantization. Given the text-aligned speech representation \\(z\\) and the quantizer containing \\(R\\) residual vector quantization layers, we generate:" + }, + { + "type": "equation", + "bbox": [ + 0.264, + 0.45, + 0.826, + 0.492 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {q}, \\hat {\\boldsymbol {z}} = \\operatorname {Q u a n t i z e r} (\\boldsymbol {z}), \\quad \\boldsymbol {q} = \\left[ \\boldsymbol {q} ^ {(1)}, \\boldsymbol {q} ^ {(2)}, \\dots , \\boldsymbol {q} ^ {(R)} \\right], \\quad \\hat {\\boldsymbol {z}} = \\sum_ {r = 1} ^ {R} \\hat {\\boldsymbol {z}} ^ {(r)} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.497, + 0.825, + 0.541 + ], + "angle": 0, + "content": "where each \\( \\pmb{q}^{(r)} \\in \\mathbb{C}^N \\) denotes the \\( r \\)-th layer code sequence with code set \\( \\mathbb{C} \\); and the quantized embedding \\( \\hat{\\pmb{z}} \\) to be the summation over each layer of the codebook vectors. Note that both of the code sequence and the quantized speech embedding \\( \\hat{\\pmb{z}} \\) are text-aligned, with the lengths to be \\( N \\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.554, + 0.394, + 0.569 + ], + "angle": 0, + "content": "2.1.2 TASTE Speech Decoder" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.578, + 0.826, + 0.634 + ], + "angle": 0, + "content": "The speech decoder aims to perform speech reconstruction conditioned on the text token sequence and the text-aligned speech tokenization. As shown in Figure 2, the text and speech tokens are aligned in lengths and being fed into the speech decoder after weighted sum in an autoregressive manner. The speech decoder is composed of the two components: the unit decoder and the unit-to-speech vocoder." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.64, + 0.825, + 0.669 + ], + "angle": 0, + "content": "The unit decoder UnitDecoder \\((\\cdot)\\) is a Transformer-based decoder that takes the text token sequence \\(\\mathbf{v}\\) and the aligned speech embedding \\(\\hat{\\mathbf{z}}\\) as input and predicts the speech unit \\(\\mathbf{y}\\) for reconstruction:" + }, + { + "type": "equation", + "bbox": [ + 0.411, + 0.673, + 0.825, + 0.689 + ], + "angle": 0, + "content": "\\[\n\\boldsymbol {y} = \\operatorname {U n i t D e c o d e r} (\\hat {\\boldsymbol {z}}, \\boldsymbol {v}). \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.693, + 0.825, + 0.737 + ], + "angle": 0, + "content": "Note that the additional speaker embedding is also taken as input to facilitate global speaker voice control in our spoken language models [16]. After we generating the speech unit \\( \\mathbf{y} \\), we use a unit-to-speech vocoder to further transform the unit into the reconstructed speech." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.749, + 0.359, + 0.765 + ], + "angle": 0, + "content": "2.1.3 Training Objective" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.773, + 0.826, + 0.857 + ], + "angle": 0, + "content": "Similar to other reconstruction-based speech tokens [24, 51], we derive TASTE by training it for speech resynthesis. To achieve this, we extract the speech unit \\( \\pmb{y}^{\\mathrm{target}} \\) with length \\( T' \\) from the original speech \\( u \\) as the target unit for our speech tokenizer and speech decoder. Given the text transcription \\( v \\), the TASTE speech embedding \\( \\hat{z} \\), and the unit from the original speech \\( \\pmb{y}^{\\mathrm{target}} \\) as the target, the speech reconstruction through the tokenizer and the unit decoder parametrized by \\( \\theta \\) under the next prediction schema can be considered as minimizing the cross-entropy loss below:" + }, + { + "type": "equation", + "bbox": [ + 0.346, + 0.874, + 0.826, + 0.915 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {c e}} (\\theta) = \\frac {1}{| T ^ {\\prime} |} \\sum_ {t = 1} ^ {T ^ {\\prime}} - \\log p _ {\\theta} \\left(y _ {t} ^ {\\text {t a r g e t}} \\mid \\hat {\\boldsymbol {z}}, \\boldsymbol {v}; \\boldsymbol {y} _ {< t} ^ {\\text {t a r g e t}}\\right) \\tag {3}\n\\]" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.138 + ], + "angle": 0, + "content": "On the other hand, we employ the quantization loss as well to tokenize the continuous representation \\( z \\) extracted from the encoder-aggregator. Following prior works [5, 49], given that \\( z^{(r)} \\) is the \\( r \\)-th residual and \\( \\hat{z}^{(r)} \\) indicates the \\( r \\)-th quantized residual, the commitment loss is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.4, + 0.144, + 0.826, + 0.186 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {r v q}} (\\theta) = \\sum_ {r = 1} ^ {R} \\| z ^ {(r)} - \\hat {z} ^ {(r)} \\|. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.198, + 0.731, + 0.213 + ], + "angle": 0, + "content": "By summation over both losses, we formulate the overall loss for training TASTE as:" + }, + { + "type": "equation", + "bbox": [ + 0.432, + 0.22, + 0.826, + 0.236 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\text {t a s t e}} = \\mathcal {L} _ {\\mathrm {c e}} + \\mathcal {L} _ {\\mathrm {r v q}}. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.242, + 0.825, + 0.271 + ], + "angle": 0, + "content": "Note that to allow gradient to back-propagate from the unit decoder through the tokenizer, the straight-through estimation technique is applied towards the quantization process during training." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.287, + 0.49, + 0.303 + ], + "angle": 0, + "content": "2.2 TASTE for Spoken Language Modeling" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.312, + 0.827, + 0.424 + ], + "angle": 0, + "content": "Next, we describe how we conduct effective spoken language modeling with TASTE. Following previous work [11, 33], we perform pre-training on speech data. The text transcription of the speech data is also used for joint speech-text pre-training of our text-aligned spoken language model (TASLM). Since TASTE tokenization already aligns with the text token sequence, we can conduct a straightforward joint modeling, as illustrated in Figure 1. To demonstrate the robustness of TASTE, we perform two types of text-aligned spoken language modeling. First, we build \\(\\mathrm{TASLM}_{\\mathrm{token}}\\) over our text-aligned speech token \\(q\\), discussed in Section 2.2.1. Then, we show how we build \\(\\mathrm{TASLM}_{\\mathrm{emb}}\\) with our text-aligned speech embedding \\(\\hat{z}\\), detailed in Section 2.2.2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.437, + 0.395, + 0.452 + ], + "angle": 0, + "content": "2.2.1 Modeling TASTE Token" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.46, + 0.825, + 0.545 + ], + "angle": 0, + "content": "As our speech tokens derived from the RVQ quantizer contain \\( R \\) layers of codes, we employ \\( R \\) linear heads for multi-head prediction in our \\( \\mathrm{TASLM}_{\\mathrm{token}} \\). Namely, the \\( \\mathrm{TASLM}_{\\mathrm{token}} \\) simultaneously predicts the next text token and the corresponding \\( R \\) layers of speech tokens in each step. The overall training objective follows the original next token prediction scheme, but with multiple predictions across modalities at each step. Specifically, given the text transcription \\( v \\) and \\( R \\) layers of quantized RVQ codes \\( q \\), the multi-head next-token prediction training objective can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.238, + 0.552, + 0.826, + 0.593 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {t o k e n}} (\\phi) = \\frac {1}{| N |} \\sum_ {i = 1} ^ {N} \\left(- \\log p _ {\\phi} ^ {\\text {t e x t}} \\left(v _ {i} \\mid \\boldsymbol {v} _ {< i}, \\boldsymbol {q} _ {< i}\\right) + \\sum_ {r = 1} ^ {R} - \\log p _ {\\phi} ^ {(\\mathrm {r})} \\left(q _ {i} ^ {(\\mathrm {r})} \\mid \\boldsymbol {v} _ {< i}, \\boldsymbol {q} _ {< i}\\right)\\right), \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.601, + 0.825, + 0.644 + ], + "angle": 0, + "content": "with \\(\\phi\\) represents the parameter of the TASLM token, and \\(p^{(r)}\\) is the \\(r\\)-th probability prediction for the \\(r\\)-th RVQ code. As for inference, we directly sample the codes and the text simultaneously, and transform the codes into the corresponding embedding for the speech decoder to generate speech." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.658, + 0.434, + 0.673 + ], + "angle": 0, + "content": "2.2.2 Modeling TASTE Embedding" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.682, + 0.825, + 0.766 + ], + "angle": 0, + "content": "Besides the token code sets, recent progress on latent modeling [17, 28] motivates us to conduct experiments on modeling our text-aligned speech embedding. Referencing MELLE [28], we employ a linear layer that predicts the mean vector \\(\\mu_{i}\\) and a log-magnitude variance vector \\(\\log \\sigma_i^2\\), where \\(i\\) indicates the \\(i\\)-th frame of the sequence. And the final predicted latent of frame \\(i\\) is denoted as \\(e_i = \\mu_i + \\sigma_i\\odot \\epsilon\\), where \\(\\epsilon \\sim \\mathcal{N}(0,I)\\). Following MELLE, the straight-through estimator is applied to allow gradients to back-propagate properly during training." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.771, + 0.827, + 0.8 + ], + "angle": 0, + "content": "To facilitate latent prediction, we apply the regularization loss and the Kullback-Leibler (KL) divergence loss driving training, which is described as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.212, + 0.807, + 0.826, + 0.85 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {r e g}} (\\psi) = \\left\\| \\boldsymbol {e} _ {\\psi} - \\hat {\\boldsymbol {z}} \\right\\| _ {2} ^ {2}, \\quad \\mathcal {L} _ {\\mathrm {K L}} = \\frac {1}{2} \\sum_ {i = 1} ^ {N} \\sum_ {j = 1} ^ {d _ {z}} \\left(\\sigma_ {i} [ j ] + \\left(\\mu_ {i} [ j ] - \\hat {z} _ {i} [ j ]\\right) ^ {2}\\right) - 1 - \\log \\sigma_ {i} ^ {2} [ j ]), \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.827, + 0.913 + ], + "angle": 0, + "content": "where \\(\\psi\\) indicates the parameter of TASLMemb, and \\(d_{z}\\) is the dimension of our text-aligned embedding \\(\\hat{z}\\). The regularization loss \\(\\mathcal{L}_{\\mathrm{reg}}\\) is adopted to predict close latent towards the target embedding \\(\\hat{z}\\). The KL divergence loss calculates the KL divergence between the predicted latent distribution and the target distribution. Following MELLE, we select the target distribution to be \\(\\mathcal{N}(\\hat{z}_i,I)\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.123 + ], + "angle": 0, + "content": "This allows simplification of \\(\\mathcal{L}_{\\mathrm{KL}}\\), which can then be approximated with the predicted vectors \\(\\mu_i, \\sigma_i\\), and the target embedding \\(\\hat{z}_i\\). Finally, the overall loss along with the text loss is described as:" + }, + { + "type": "equation", + "bbox": [ + 0.266, + 0.129, + 0.826, + 0.171 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {e m b}} (\\psi) = \\lambda_ {\\mathrm {r e g}} \\cdot \\mathcal {L} _ {\\mathrm {r e g}} + \\lambda_ {\\mathrm {K L}} \\cdot \\mathcal {L} _ {\\mathrm {K L}} + \\frac {1}{| N |} \\sum_ {i = 1} ^ {N} - \\log p _ {\\psi} ^ {\\text {t e x t}} \\left(v _ {i} \\mid \\boldsymbol {v} _ {< i}, \\hat {\\boldsymbol {z}} _ {< i}\\right), \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.177, + 0.684, + 0.195 + ], + "angle": 0, + "content": "where \\(\\lambda_{\\mathrm{reg}}\\), \\(\\lambda_{\\mathrm{KL}}\\) to be the weighted coefficients of the two losses, respectively." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.211, + 0.36, + 0.23 + ], + "angle": 0, + "content": "3 Experiment Setup" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.243, + 0.363, + 0.259 + ], + "angle": 0, + "content": "3.1 Model Configuration" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.268, + 0.828, + 0.477 + ], + "angle": 0, + "content": "For our TASTE speech tokenizer, we initialize our encoder from Whisper [35]. Specifically, we use whisper-large-v3 for our initialization. By doing so, we can reduce computational cost between obtaining the ASR transcription and extracting the TASTE tokenization with the TASTE encoder frozen during training. On the other hand, we use the S3 token from CosyVoice [7] as the target unit for speech reconstruction. Since their speech tokenization facilitates additional speaker embedding, we follow the same procedure to obtain one. Adding speaker embedding allows global speaker voice control, which is a reasonable and useful scenario for spoken language models. The unit-to-speech vocoder is comprised of a flow model [23, 27] and a HifiGAN. We use the published pre-trained ones from [7], and they are not involved in our training. For the quantizer, we set the RVQ layer \\( R = 4 \\), the codebook size 512, and the codebook dimension to be 256. For the spoken language modeling, we follow previous work [6, 11, 22, 33] and initialize our spoken language model from a text LLM. However, this introduces the vocabulary mismatch problem between the ASR and LLM. We resolve this issue by using word-level TASTE tokenization and embedding, which is detailed in Appendix A.2. Moreover, we conduct Low-Rank Adaptation (LoRA) for parameter-efficient fine-tuning of our TASLMs. We set the corresponding hyperparameters rank \\( r = 64 \\) and \\( \\alpha = 128 \\)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.492, + 0.268, + 0.506 + ], + "angle": 0, + "content": "3.2 Dataset" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.518, + 0.828, + 0.603 + ], + "angle": 0, + "content": "We use two datasets—Emilia and LibriTTS—as our training datasets. Emilia [12] is an in-the-wild dataset where the speech is web-scaled and the transcriptions are pseudo-labeled. We use only the English subset of this multi-lingual corpus, which is about 40,000 hours. LibriTTS [50] is a reading-style corpus based on LibriSpeech [34]. We use all the training splits in LibriTTS for training, which is approximately 600 hours of speech. In addition, the test-clean split in LibriSpeech is used for evaluation purposes for our TASTE tokenizer and TASLMs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.621, + 0.262, + 0.637 + ], + "angle": 0, + "content": "4 Result" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.652, + 0.828, + 0.696 + ], + "angle": 0, + "content": "We separate the evaluation into two phases: Section 4.1 shows the results regarding our TASTE tokenization; while Section 4.2 evaluates our TASLM across multiple aspects, including acoustic, semantic, and continuation. For clarity, the metrics are introduced within each section." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.711, + 0.434, + 0.725 + ], + "angle": 0, + "content": "4.1 Results of TASTE Tokenization" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.736, + 0.466, + 0.752 + ], + "angle": 0, + "content": "4.1.1 Speech Reconstruction Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.761, + 0.825, + 0.803 + ], + "angle": 0, + "content": "We first represent the speech reconstruction evaluation results. For comprehensive evaluation, we use different metrics, including the reference-free metrics for quality assessment, and the reference-based metrics for evaluating the similarity between the reconstructed and the original speech." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.817, + 0.826, + 0.889 + ], + "angle": 0, + "content": "Quality Assessment We use ASR-WER, UTMOS [38], and DNS-MOS [37] as our metrics for evaluating the speech quality. For ASR-WER, we use HuBERT-Large [14] as the ASR model to transcribe the speech, and then calculate the word-error rate (WER) on the transcription. \\( {}^{1} \\) UTMOS and DNS-MOS are both neural-based MOS predictors. While both evaluate the speech quality, the design purpose of DNS-MOS makes it more suitable for evaluation regarding the noise levels." + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.898, + 0.612, + 0.913 + ], + "angle": 0, + "content": "" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.098, + 0.825, + 0.139 + ], + "angle": 0, + "content": "Table 1: The speech tokenization evaluation results on the test-clean split of LibriTTS. The evaluation is separated into the QUALITY and the SIMILARITY assessments, as introduced in Section 4.1.1. We use gray text to indicate the worst-performing methods in each metric." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.14, + 0.825, + 0.319 + ], + "angle": 0, + "content": "
MethodBitrateQUALITYSIMILARITY
WER ↓UTMOSDNSMOSViSQOLDrtn. Con.Spkr. Sim.MUSHRA
Ground Truth256k2.1%4.093.84---76.6
Encodec [5]15005.1%1.583.263.460.940.63-
30002.6%2.353.483.810.960.7825.6
SpeechTokenizer [51]5005.2%1.272.992.800.940.35-
20003.0%3.563.603.650.970.8053.9
40002.5%3.903.764.030.980.92-
Mimi [6]10003.1%3.603.603.620.960.8267.6
S3 token (topline) [7]6003.0%4.183.903.300.960.8270.2
Text-only (baseline)~505.9%4.314.112.440.570.7842.6
TASTE (ours)~1504.4%4.294.103.050.910.8068.3
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.346, + 0.827, + 0.457 + ], + "angle": 0, + "content": "Similarity Assessment For similarity, we measure ViSQOL, duration consistency (Drtn. Con.), speaker similarity (Spkr. Sim.) and the MUSHRA test as human evaluation. We use ViSQOL [4] is a production-ready tool that predicts speech quality via spectro-temporal image similarity comparisons. For the duration consistency, we first get the word-level alignment of the transcriptions of the original and the reconstructed speech using Montreal Forced Aligner [26]; then we calculate if the duration between each of the same words is matched under a preset tolerance window, which is set to 50 milliseconds. For MUSHRA human listening test, we reference the original protocol [40] to instruct evaluators to rate the similarity and quality on a scale of 1 to 100 with reference given." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.472, + 0.827, + 0.693 + ], + "angle": 0, + "content": "Speech Reconstruction Results The evaluation results of our speech reconstruction on LibriSpeech are shown in Table 1. We highlight that our TASTE speech tokenization has the lowest bitrate among all the other speech tokenization methods. Note that since the speech tokenization is dynamic in frequency, we estimate our bitrate based on calculating the overall token count and the duration over the testing set. Despite the low bitrate, we generally attain much better performance comparing with the worst performing methods (gray text in the table) across each metric. Moreover, on the quality assessment, our MOS prediction scores are the second highest and even surpasses the ground truth, showcasing the reconstructed speech is of high quality. Next, we focus on the results of the similarity assessment. As for the duration consistency, we score the second-worst performance comparing with the other methods. We attribute this to the fact that our tokenization compress the sequence in a very dynamic way. Despite that, we still outperform the text-only method with a large margin, perform close towards other speech tokenization methods which all have a fixed down-sampling rate. Lastly, our method attains the second-highest MUSHRA score (excluding the ground-truth anchor). This highlights TASTE's effectiveness: even without reproducing every microscopic detail, it still yields perceptually high-quality speech in human listening tests. Overall, TASTE carries rich paralinguistic information, facilitating high-end speech reconstruction under an extremely low bitrate." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.708, + 0.506, + 0.723 + ], + "angle": 0, + "content": "4.1.2 TASTE for Text-Aligned Speech Editing" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.732, + 0.827, + 0.913 + ], + "angle": 0, + "content": "After comprehensive speech-reconstruction experiments, we show that TASTE can also perform text-aligned speech editing. Suppose we have two utterances with the same transcript but different paralinguistic characteristics. By exchanging their TASTE token sequences word by word, we ask whether the associated paralinguistic traits are transferred as well. To make the effect easy to see, we choose utterances that differ mainly in speaking rate and focus on duration changes. The overall text-aligned editing procedure is describe as follows: 1) Extract the TASTE tokens \\(\\hat{z}^{\\mathrm{orig}}\\) for each source utterance 2) Swap the tokens at the desired text positions, resulting in edited TASTE tokens \\(\\hat{z}^{\\mathrm{edit}}\\). 3) Decode the edited token sequence \\(\\hat{z}^{\\mathrm{edit}}\\) back to speech. In Figure 3, we present the alignments obtained from MFA [26] of the original speech and the speech after editing, and compare them horizontally. As shown in the figure, words whose tokens were swapped exhibit clear duration shifts, while the untouched words keep their original timing—evidence that TASTE enables precise, text-aligned manipulation. Additional examples that target other paralinguistic dimensions are provided on our demo page." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.177, + 0.089, + 0.428, + 0.236 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.432, + 0.089, + 0.824, + 0.237 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.244, + 0.828, + 0.302 + ], + "angle": 0, + "content": "Figure 3: An illustration of TASTE for text-aligned speech editing. On the left shows the process of our text-aligned speech editing. We first extract the TASTE tokens; swap the tokens partially; and then decode the edited TASTE tokens into edited speech. On the right shows an example visualization. Only the durations of the words with exchanged TASTE tokens show significant difference." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.327, + 0.584, + 0.344 + ], + "angle": 0, + "content": "4.2 Evaluating Text-Aligned Spoken Language Modeling" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.353, + 0.828, + 0.41 + ], + "angle": 0, + "content": "To provide a comprehensive evaluation of our text-aligned spoken language modeling (TASLM), we first compare our pre-trained SLM with other methods through speech continuation and likelihood-based benchmarks in Section 4.2.1. Then, to further investigate the understanding capabilities, we conduct evaluation with spoken question answering in Section 4.2.2." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.424, + 0.528, + 0.439 + ], + "angle": 0, + "content": "4.2.1 Comparing TASLM with Pretrained SLMs" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.447, + 0.828, + 0.56 + ], + "angle": 0, + "content": "Speech Continuation Evaluation A typical way to evaluate the pre-trained SLM is by performing conditional generation. Following previous work [11, 22], we use the 3-second prompt speech from the LibriSpeech test-clean. To evaluate the quality of the continuations generated by the SLMs, we employ GPT-4o to assign MOS scores regarding the transcribed speech continuation using ASR, focusing on the semantic coherence of the continuation. In addition, we compute UTMOS as for evaluating the speech quality and naturalness. Last but not least, we conduct human listening test, in which each evaluator is asked to give a MOS score regarding the overall performance of the generated speech continuation. The details of the instructions for GPT-4o and human are in Appendix." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.574, + 0.828, + 0.618 + ], + "angle": 0, + "content": "Likelihood-Based Evaluation Following previous work [11, 22, 33], we also evaluate our SLMs through likelihood-based benchmarks, where the accuracy score is based on whether the model chooses the correct continuation from the two given speech utterances based on its output likelihoods." + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.648, + 0.826, + 0.717 + ], + "angle": 0, + "content": "Table 2: Pretrained SLM speech continuation and likelihood-based next-speech selection results. The superscripts at the bottom of the table indicate the base models used by each SLM, indicated by superscripts. Cascade models refer to the pipeline with ASR (whisper-large-v3), text continuation by LMs, and TTS (CosyVoice). This comparison evaluates SLMs and cascade models in continuation evaluation. As shown in the table, TASLM tends to preserve the semantic capabilities of LMs." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.718, + 0.825, + 0.896 + ], + "angle": 0, + "content": "
MethodFinetuned / base parametersCONTINUATIONLIKELIHOOD
GPT-4oUTMOSHumanSALMONStoryClozeOverall
Cascade
Cascade (LLaMA3.2-1Bα)-3.154.254.00---
Cascade (LLaMA2-7Bβ)-3.434.253.98---
Spoken LMs
TWIST 1.3B [11]1.3B / 1.3Bθ1.483.251.9562.561.562.0
TWIST 7B [11]7B / 7Bγ1.443.272.0463.464.764.1
Spirit LM [33]7B / 7Bβ2.793.412.3859.172.065.6
Spirit LM Expr. [33]7B / 7Bβ1.903.402.4169.066.267.6
Baseline (S3 token)45M / 1.3Bα1.374.042.8450.258.754.5
TASLM 1B (token)45M / 1.3Bα3.084.073.9360.876.568.7
TASLM 1B (embed.)45M / 1.3Bα3.164.224.1657.776.767.2
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.179, + 0.897, + 0.51, + 0.907 + ], + "angle": 0, + "content": "Base models: \\(\\alpha\\) LLaMA3.2-1B, \\(\\beta\\) LLaMA2-7B, \\(\\gamma\\) LLaMA-7B, \\(\\theta\\) OPT-1.3B" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.162 + ], + "angle": 0, + "content": "We adopt two established benchmarks SALMON [25] and spoken StoryCloze [11, 29], which covers the acoustic aspect and the semantic aspect, respectively. Since both benchmarks contain multiple tasks, we report the average accuracy across these tasks within each benchmark for simplicity. The detailed results are in Appendix A.5.1 for the interested readers. We also report the mean of the SALMON and StoryCloze as an overall assessment for both aspects." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.176, + 0.827, + 0.343 + ], + "angle": 0, + "content": "Results The results of TASLM comparing to other pre-trained SLM are in Table 2. First, we highlight that our TASLMs have achieved significantly higher scores on speech continuation across human and machine evaluations; and good performance on the likelihood-based benchmarks. Note that our base language model contains only 1.3 billion parameters, showing the effectiveness of using TASTE for joint modeling. Compared to the cascade method that has the same base model (first row), our \\(\\mathrm{TASLM}_{\\mathrm{emb}}\\) scores comparable on GPT-4o but better on human MOS. This indicates that our generated speech is more natural than the cascade one that utilizes TTS for synthesis. Next, our TASLM is the only SLM that not only maintains but even surpasses the performance of its corresponding text-base model. Moreover, we demonstrate that directly using the S3 token for joint modeling following [46] does not yield performance comparable in any aspect. The result further strengthen the intuition behind TASTE (mitigating the length-mismatch during tokenization stage facilitates effective joint spoken language modeling)." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.357, + 0.51, + 0.373 + ], + "angle": 0, + "content": "4.2.2 TASLM for Spoken Question Answering" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.38, + 0.468, + 0.601 + ], + "angle": 0, + "content": "Following [6], we conduct evaluation on spoken question answering to investigate the understanding ability of our TASLM. For this experiment, we use the \\(\\mathrm{TASLM}_{\\mathrm{emb}}\\) for simplicity. We compare our pre-trained only SLM with other instruction-finetuned joint SLMs such as Mini-Omni [46], Moshi [6], and Llama-Omni [9]. We use two spoken question answering benchmarks, Web Questions [2] and LLaMA-Questions [30], following [30]. We report the accuracy of answer containment. For fairness, we report not only the performance of the speech-text joint SLMs, but also the base text LLM they used if applicable. Our results indicate that our TASLM is the only method that does not" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.601, + 0.825, + 0.63 + ], + "angle": 0, + "content": "degrade the corresponding text base LLM. We attribute the phenomenon to the effectiveness of our TASTE tokenization for joint speech-text modeling." + }, + { + "type": "table_caption", + "bbox": [ + 0.476, + 0.397, + 0.827, + 0.439 + ], + "angle": 0, + "content": "Table 3: Evaluation of spoken question answering. Performance across modalities is compared row-wise, where T denotes text and A denotes audio." + }, + { + "type": "table", + "bbox": [ + 0.48, + 0.446, + 0.825, + 0.579 + ], + "angle": 0, + "content": "
MethodModeWeb Q.LLaMA-Q.
Mini-Omni 0.5B(T→T)T21.339.0
Mini-Omni 0.5B [46]T+A4.511.6
Helium 7B (text)T32.375.0
Moshi 7B [6]T+A26.662.3
LLaMA3.1-8B-InstructT60.471.7
Llama-Omni-8B [9]T+A35.567.3
LLaMA3.2-1B†T24.051.0
TASLM 1B (embed.)†T+A27.157.6
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.487, + 0.579, + 0.77, + 0.591 + ], + "angle": 0, + "content": "We apply few-shot learning to facilitate question answering." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.648, + 0.303, + 0.664 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.68, + 0.827, + 0.806 + ], + "angle": 0, + "content": "In this work, we propose Text-Aligned Speech Tokenization and Embedding (TASTE), to facilitate joint speech-text spoken language modeling. By aggregating proper encoder representation through the specialized cross-attention mechanism and taking the ASR model as initialization, we make the speech tokenization text-aligned in an end-to-end manner with no explicit word alignment required. We conduct extensive evaluation on our TASTE tokenizer. Our results show that TASTE allows high quality speech reconstruction at an extremely low bitrate. With our text-aligned speech tokenization and embedding, joint speech-text modeling becomes straightforward and effective. Our experimental results indicate that TASTE enables turning a text LLM into a spoken one with the simple parameter-efficient finetuning technique applied." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.819, + 0.829, + 0.906 + ], + "angle": 0, + "content": "**Limitation** Several limitations of our current work point to promising avenues for future research. First, neither our TASTE tokenization nor the text-aligned SLM has been optimized for time efficiency; developing a low-latency, streaming variant remains future work. Second, we have evaluated TASTE only on English data—its portability to other languages deserves thorough investigation. Third, although our pretrained SLM generates high-quality continuations, it does not yet support robust turn-taking or instruction-following behavior, both of which are essential for truly interactive systems." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.09, + 0.27, + 0.107 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.113, + 0.826, + 0.156 + ], + "angle": 0, + "content": "[1] Alexei Baevski, Yuhao Zhou, Abdelrahman Mohamed, and Michael Auli. wav2vec 2.0: A framework for self-supervised learning of speech representations. In Advances in Neural Information Processing Systems, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.164, + 0.826, + 0.207 + ], + "angle": 0, + "content": "[2] Jonathan Berant, Andrew Chou, Roy Frostig, and Percy Liang. Semantic parsing on freebase from question-answer pairs. In Proceedings of the 2013 conference on empirical methods in natural language processing, 2013." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.214, + 0.827, + 0.257 + ], + "angle": 0, + "content": "[3] Li-Wei Chen and Alexander Rudnicky. Fine-grained style control in transformer-based text-to-speech synthesis. In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.265, + 0.825, + 0.308 + ], + "angle": 0, + "content": "[4] Michael Chinen, Felicia SC Lim, Jan Skoglund, Nikita Gureev, Feargus O'Gorman, and Andrew Hines. Visqol v3: An open source production ready objective speech and audio metric. In 2020 twelfth international conference on quality of multimedia experience (QoMEX), 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.315, + 0.824, + 0.344 + ], + "angle": 0, + "content": "[5] Alexandre Défossez, Jade Copet, Gabriel Synnaeve, and Yossi Adi. High fidelity neural audio compression. Transactions on Machine Learning Research, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.352, + 0.826, + 0.395 + ], + "angle": 0, + "content": "[6] Alexandre Défossez, Laurent Mazaré, Manu Orsini, Amélie Royer, Patrick Pérez, Hervé Jégou, Edouard Grave, and Neil Zeghidour. Moshi: a speech-text foundation model for real-time dialogue. arXiv preprint arXiv:2410.00037, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.402, + 0.825, + 0.445 + ], + "angle": 0, + "content": "[7] Zhihao Du, Qian Chen, Shiliang Zhang, Kai Hu, Heng Lu, Yexin Yang, Hangrui Hu, Siqi Zheng, Yue Gu, Ziyang Ma, et al. Cosyvoice: A scalable multilingual zero-shot text-to-speech synthesizer based on supervised semantic tokens. arXiv preprint arXiv:2407.05407, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.453, + 0.825, + 0.495 + ], + "angle": 0, + "content": "[8] Zhihao Du, Yuxuan Wang, Qian Chen, Xian Shi, Xiang Lv, Tianyu Zhao, Zhifu Gao, Yexin Yang, Changfeng Gao, Hui Wang, et al. Cosyvoice 2: Scalable streaming speech synthesis with large language models. CoRR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.503, + 0.827, + 0.533 + ], + "angle": 0, + "content": "[9] Qingkai Fang, Shoutao Guo, Yan Zhou, Zhengrui Ma, Shaolei Zhang, and Yang Feng. LlamaOmni: Seamless speech interaction with large language models. CoRR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.539, + 0.825, + 0.569 + ], + "angle": 0, + "content": "[10] Sanchit Gandhi, Patrick von Platen, and Alexander M Rush. Distil-whisper: Robust knowledge distillation via large-scale pseudo labelling. arXiv preprint arXiv:2311.00430, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.577, + 0.825, + 0.62 + ], + "angle": 0, + "content": "[11] Michael Hassid, Tal Remez, Tu Anh Nguyen, Itai Gat, Alexis Conneau, Felix Kreuk, Jade Copet, Alexandre Defossez, Gabriel Synnaeve, Emmanuel Dupoux, et al. Textually pretrained speech language models. Advances in Neural Information Processing Systems, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.626, + 0.827, + 0.683 + ], + "angle": 0, + "content": "[12] Haorui He, Zengqiang Shang, Chaoren Wang, Xuyuan Li, Yicheng Gu, Hua Hua, Liwei Liu, Chen Yang, Jiaqi Li, Peiyang Shi, et al. Emilia: An extensive, multilingual, and diverse speech dataset for large-scale speech generation. In 2024 IEEE Spoken Language Technology Workshop (SLT), 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.69, + 0.825, + 0.734 + ], + "angle": 0, + "content": "[13] Pin-Lun Hsu, Yun Dai, Vignesh Kothapalli, Qingquan Song, Shao Tang, Siyu Zhu, Steven Shimizu, Shivam Sahni, Haowen Ning, and Yanning Chen. Liger kernel: Efficient triton kernels for llm training. arXiv preprint arXiv:2410.10989, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.741, + 0.827, + 0.798 + ], + "angle": 0, + "content": "[14] Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, and Abdelrahman Mohamed. Hubert: Self-supervised speech representation learning by masked prediction of hidden units. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.806, + 0.827, + 0.848 + ], + "angle": 0, + "content": "[15] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuzhhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.856, + 0.827, + 0.911 + ], + "angle": 0, + "content": "[16] Zeqian Ju, Yuancheng Wang, Kai Shen, Xu Tan, Detai Xin, Dongchao Yang, Yanqing Liu, Yichong Leng, Kaitao Song, Siliang Tang, et al. Naturalspeech 3: Zero-shot speech synthesis with factorized codec and diffusion models. International Conference on Machine Learning, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.113, + 0.827, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.122 + ], + "angle": 0, + "content": "[17] Jaehyeon Kim, Keon Lee, Seungjun Chung, and Jaewoong Cho. Clam-tts: Improving neural codec language model for zero-shot text-to-speech. *ICLR*, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.131, + 0.826, + 0.175 + ], + "angle": 0, + "content": "[18] Taku Kudo. Subword regularization: Improving neural network translation models with multiple subword candidates. Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.185, + 0.825, + 0.229 + ], + "angle": 0, + "content": "[19] Taku Kudo and John Richardson. Sentencepiece: A simple and language independent subword tokenizer and tokenizer for neural text processing. Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.238, + 0.827, + 0.281 + ], + "angle": 0, + "content": "[20] Rithesh Kumar, Prem Seetharaman, Alejandro Luebs, Ishaan Kumar, and Kundan Kumar. High-fidelity audio compression with improved rvqgan. Advances in Neural Information Processing Systems, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.292, + 0.827, + 0.348 + ], + "angle": 0, + "content": "[21] Kushal Lakhotia, Eugene Kharitonov, Wei-Ning Hsu, Yossi Adi, Adam Polyak, Benjamin Bolte, Tu-Anh Nguyen, Jade Copet, Alexei Baevski, Abdelrahman Mohamed, et al. On generative spoken language modeling from raw audio. Transactions of the Association for Computational Linguistics, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.359, + 0.827, + 0.403 + ], + "angle": 0, + "content": "[22] Guan-Ting Lin, Prashanth Gurunath Shivakumar, Aditya Gourav, Yile Gu, Ankur Gandhe, Hung-yi Lee, and Ivan Bulyko. Align-slm: Textless spoken language models with reinforcement learning from ai feedback. arXiv preprint arXiv:2411.01834, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.413, + 0.827, + 0.455 + ], + "angle": 0, + "content": "[23] Yaron Lipman, Ricky TQ Chen, Heli Ben-Hamu, Maximilian Nickel, and Matt Le. Flow matching for generative modeling. The Eleventh International Conference on Learning Representations, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.466, + 0.827, + 0.523 + ], + "angle": 0, + "content": "[24] Alexander H Liu, Sang-gil Lee, Chao-Han Huck Yang, Yuan Gong, Yu-Chiang Frank Wang, James R Glass, Rafael Valle, and Bryan Catanzaro. Uniwav: Towards unified pre-training for speech representation learning and generation. The Thirteenth International Conference on Learning Representations, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.533, + 0.825, + 0.563 + ], + "angle": 0, + "content": "[25] Gallil Maimon, Amit Roth, and Yossi Adi. Salmon: A suite for acoustic language model evaluation. arXiv preprint arXiv:2409.07437, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.574, + 0.827, + 0.616 + ], + "angle": 0, + "content": "[26] Michael McAuliffe, Michaela Socolof, Sarah Mihuc, Michael Wagner, and Morgan Sonderegger. Montreal forced aligner: Trainable text-speech alignment using kaldi. In Interspeech 2017, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.627, + 0.827, + 0.669 + ], + "angle": 0, + "content": "[27] Shivam Mehta, Ambika Kirkland, Harm Lameris, Jonas Beskow, Éva Székely, and Gustav Eje Henter. Overflow: Putting flows on top of neural transducers for better tts. Interspeech 2023, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.681, + 0.827, + 0.723 + ], + "angle": 0, + "content": "[28] Lingwei Meng, Long Zhou, Shujie Liu, Sanyuan Chen, Bing Han, Shujie Hu, Yanqing Liu, Jinyu Li, Sheng Zhao, Xixin Wu, et al. Autoregressive speech synthesis without vector quantization. CoRR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.735, + 0.825, + 0.778 + ], + "angle": 0, + "content": "[29] Nasrin Mostafazadeh, Nathanael Chambers, Xiaodong He, Devi Parikh, Dhruv Batra, Lucy Vanderwende, Pushmeet Kohli, and James Allen. A corpus and evaluation framework for deeper understanding of commonsense stories. Proceedings of NAACL-HLT, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.788, + 0.827, + 0.845 + ], + "angle": 0, + "content": "[30] Eliya Nachmani, Alon Levkovitch, Roy Hirsch, Julian Salazar, Chulayuth Asawaroengchai, Soroosh Mariooryad, Ehud Rivlin, RJ Skerry-Ryan, and Michelle Tadmor Ramanovich. Spoken question answering and speech continuation using spectrogram-powered llm. In The Twelfth International Conference on Learning Representations, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.856, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[31] Tu Anh Nguyen, Maureen de Seyssel, Patricia Rozé, Morgane Rivière, Evgeny Kharitonov, Alexei Baevski, Ewan Dunbar, and Emmanuel Dupoux. The zero resource speech benchmark 2021: Metrics and baselines for unsupervised spoken language modeling. NeuRIPS Workshop on Self-Supervised Learning for Speech and Audio Processing, 2020." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.828, + 0.148 + ], + "angle": 0, + "content": "[32] Tu Anh Nguyen, Eugene Kharitonov, Jade Copet, Yossi Adi, Wei-Ning Hsu, Ali Elkahky, Paden Tomasello, Robin Algayres, Benoit Sagot, Abdelrahman Mohamed, et al. Generative spoken dialogue language modeling. Transactions of the Association for Computational Linguistics, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.158, + 0.828, + 0.217 + ], + "angle": 0, + "content": "[33] Tu Anh Nguyen, Benjamin Muller, Bokai Yu, Marta R Costa-Jussa, Maha Elbayad, Sravya Popuri, Christophe Ropers, Paul-Ambroise Duquenne, Robin Algayres, Ruslan Mavlyutov, et al. Spirit-lm: Interleaved spoken and written language model. Transactions of the Association for Computational Linguistics, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.226, + 0.825, + 0.271 + ], + "angle": 0, + "content": "[34] Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. Librispeech: An asr corpus based on public domain audio books. In 2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2015." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.279, + 0.825, + 0.324 + ], + "angle": 0, + "content": "[35] Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. Robust speech recognition via large-scale weak supervision. In International conference on machine learning, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.332, + 0.825, + 0.39 + ], + "angle": 0, + "content": "[36] Jeff Rasley, Samyam Rajbhandari, Olatunjri Ruwase, and Yuxiong He. Deepspeed: System optimizations enable training deep learning models with over 100 billion parameters. In Proceedings of the 26th ACM SIGKDD international conference on knowledge discovery and data mining, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.4, + 0.825, + 0.444 + ], + "angle": 0, + "content": "[37] Chandan KA Reddy, Vishak Gopal, and Ross Cutler. Dnsmos: A non-intrusive perceptual objective speech quality metric to evaluate noise suppressors. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.453, + 0.825, + 0.497 + ], + "angle": 0, + "content": "[38] Takaaki Saeki, Detai Xin, Wataru Nakata, Tomoki Koriyama, Shinnosuke Takamichi, and Hiroshi Saruwatari. Utmos: Utokyo-sarulab system for voicemos challenge 2022. Interspeech 2022, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.507, + 0.825, + 0.552 + ], + "angle": 0, + "content": "[39] Rico Sennrich, Barry Haddow, and Alexandra Birch. Neural machine translation of rare words with subword units. Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.561, + 0.827, + 0.591 + ], + "angle": 0, + "content": "[40] B Series. Method for the subjective assessment of intermediate quality level of audio systems. International Telecommunication Union Radiocommunication Assembly, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.601, + 0.825, + 0.644 + ], + "angle": 0, + "content": "[41] Hubert Siuzdak, Florian Grötschla, and Luca A Lanzendorfer. Snac: Multi-scale neural audio codec. Audio Imagination: NeurIPS 2024 Workshop AI-Driven Speech, Music, and Sound Generation, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.655, + 0.827, + 0.713 + ], + "angle": 0, + "content": "[42] Hsiang-Sheng Tsai, Heng-Jui Chang, Wen-Chin Huang, Zili Huang, Kushal Lakhotia, Shu-wen Yang, Shuyan Dong, Andy T Liu, Cheng-I Jeff Lai, Jiatong Shi, et al. Superb-sg: Enhanced speech processing universal performance benchmark for semantic and generative capabilities. Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.722, + 0.827, + 0.765 + ], + "angle": 0, + "content": "[43] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.776, + 0.825, + 0.82 + ], + "angle": 0, + "content": "[44] Apoorv Vyas, Bowen Shi, Matthew Le, Andros Tjandra, Yi-Chiao Wu, Baishan Guo, Jiemin Zhang, Xinyue Zhang, Robert Adkins, William Ngan, et al. Audiobox: Unified audio generation with natural language prompts. arXiv preprint arXiv:2312.15821, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.829, + 0.825, + 0.873 + ], + "angle": 0, + "content": "[45] Chengyi Wang, Sanyuan Chen, Yu Wu, Ziqiang Zhang, Long Zhou, Shujie Liu, Zhuo Chen, Yanqing Liu, Huaming Wang, Jinyu Li, et al. Neural codec language models are zero-shot text to speech synthesizers. arXiv preprint arXiv:2301.02111, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.883, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[46] Zhifei Xie and Changqiao Wu. Mini-omni: Language models can hear, talk while thinking in streaming. arXiv preprint arXiv:2408.16725, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.828, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.148 + ], + "angle": 0, + "content": "[47] Detai Xin, Xu Tan, Kai Shen, Zeqian Ju, Dongchao Yang, Yuancheng Wang, Shinnosuke Takamichi, Hiroshi Saruwatari, Shujie Liu, Jinyu Li, et al. Rall-e: Robust codec language modeling with chain-of-thought prompting for text-to-speech synthesis. arXiv preprint arXiv:2404.03204, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.157, + 0.825, + 0.2 + ], + "angle": 0, + "content": "[48] Shu-wen Yang, Po-Han Chi, Yung-Sung Chuang, Cheng-I Jeff Lai, Kushal Lakhotia, Yist Y Lin, Andy T Liu, Jiatong Shi, Xuankai Chang, Guan-Ting Lin, et al. Superb: Speech processing universal performance benchmark. Interspeech 2021, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.208, + 0.827, + 0.251 + ], + "angle": 0, + "content": "[49] Neil Zeghidour, Alejandro Luebs, Ahmed Omran, Jan Skoglund, and Marco Tagliasacchi. Soundstream: An end-to-end neural audio codec. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.259, + 0.825, + 0.289 + ], + "angle": 0, + "content": "[50] Heiga Zen, Viet Dang, Rob Clark, Yu Zhang, Ron J Weiss, Ye Jia, Zhifeng Chen, and Yonghui Wu. Libritts: A corpus derived from librispeech for text-to-speech. Interspeech 2019, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.297, + 0.825, + 0.327 + ], + "angle": 0, + "content": "[51] Xin Zhang, Dong Zhang, Shimin Li, Yaqian Zhou, and Xipeng Qiu. Speechtokenizer: Unified speech tokenizer for speech large language models. ICLR, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.327 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.09, + 0.64, + 0.108 + ], + "angle": 0, + "content": "A Technical Appendices and Supplementary Material" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.121, + 0.317, + 0.135 + ], + "angle": 0, + "content": "A.1 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.146, + 0.827, + 0.257 + ], + "angle": 0, + "content": "Recent SLMs often require speech tokenization to conduct language modeling with the next prediction objective as the text LLMs. Unlike text, the speech signal is continuous and lengthy, making it difficult to derive proper speech tokenization for spoken language modeling. Common approaches may utilize self-supervised learned (SSL) speech models followed by quantization techniques to extract speech tokens [1, 11, 14, 21, 33]. In addition, audio or speech codec models have also been used for tokenization in recent SLMs [5, 6, 49, 51]. These models are designed for resynthesis, where the speech decoders are jointly learned with the encoders, making them easy to use for spoken language modeling." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.263, + 0.828, + 0.377 + ], + "angle": 0, + "content": "With speech tokenization, GSLM [21, 32] first demonstrates the possibility of building an SLM that can generate speech. TWIST [11] further shows that SLM can benefit from initialization with the text-pretrained LLM. With regard to the huge success of text-only LLMs, recent work shifts the focus towards joint speech-text modeling [6, 11, 46]. Challenged by the modality gap between speech and text tokens, different techniques are introduced to facilitate joint modeling. Spirit LM [33] adopts an interleaving strategy; moshi [6] trains its own tokenizer with a reduced token frequency. Moreover, different patterns and strategies such as delayed or sequential generation are introduced for joint modeling, aiming for more reasonable and coherent speech outputs [46]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.381, + 0.827, + 0.465 + ], + "angle": 0, + "content": "Despite the increasing demand of joint speech-text modeling [6, 33, 46], we do not find any work discussing the effectiveness of current speech tokenization for it. Moreover, the speech token is often derived with speech or audio-only data2. Nonetheless, we observe that recent work is trying to mitigate the modality gap by reducing frequency speech token or conducting additional training stage for text-speech alignment. This motivates us to design a speech tokenization that is directly aligned with its text counterpart, tackling the mismatch issue during the tokenization stage." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.47, + 0.828, + 0.596 + ], + "angle": 0, + "content": "In the main text, we have mentioned that we utilize a specialized mechanism based on attention to extract and aggregate the encoder representations. We clarify that the text-speech cross-attention mechanism has also been used for fine-grained control of text-to-speech synthesis (TTS). More specifically, Chen and Rudnicky [3] propose content-style cross-attention to indicate their text-speech cross-attention mechanism that enables style transfer in TTS. Although both utilize specialized text-speech cross-attention mechanism, the design choices and problem formulations are completely different. We attribute of our main novelty to inventing a text-aligned speech tokenization and embedding for joint spoken language modeling, and the text-speech cross attention mechanism is considered and shown to be a clean, effective, and straightforward way of achieving it." + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.897, + 0.733, + 0.912 + ], + "angle": 0, + "content": "2An exception is CosyVoice [7]. We discuss it in Section 2 since it is related to our method." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.092, + 0.461, + 0.107 + ], + "angle": 0, + "content": "A.2 Tackling the Vocabulary Mismatch" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.135, + 0.828, + 0.304 + ], + "angle": 0, + "content": "The vocabulary mismatch problem lies in the fact that the vocabulary sets are different between the ASR and the LLM, and TASTE is aligned with the text transcription tokens from ASR. Consider that given a text transcription \\( \\mathbf{v} \\) and the vocabulary sets of ASR and LLM denoted as \\( \\mathbb{V}^{\\mathrm{asr}} \\) and \\( \\mathbb{V}^{\\mathrm{llm}} \\), the ASR tokenized sequence \\( \\mathbf{v}^{\\mathrm{asr}} = [v_1^{\\mathrm{asr}}, v_2^{\\mathrm{asr}}, \\dots, v_N^{\\mathrm{asr}}] \\), \\( v_i^{\\mathrm{asr}} \\in \\mathbb{V}^{\\mathrm{asr}} \\) and the LLM tokenized sequence \\( \\mathbf{v}^{\\mathrm{llm}} = [v_1^{\\mathrm{llm}}, v_2^{\\mathrm{llm}}, \\dots, v_M^{\\mathrm{llm}}] \\), \\( v_i^{\\mathrm{llm}} \\in \\mathbb{V}^{\\mathrm{llm}} \\) can be different in terms of token ids and sequence lengths. Since the TASTE token and embedding are aligned with \\( \\mathbf{v}^{\\mathrm{asr}} \\), we need to derive a method to align them with \\( \\mathbf{v}^{\\mathrm{llm}} \\) for text-aligned speech-text modeling. Notice that \\( \\mathbf{v}^{\\mathrm{asr}} \\) and \\( \\mathbf{v}^{\\mathrm{llm}} \\) both represent \\( \\mathbf{v} \\), we propose to mitigate the issue through word-level grouping, averaging, and aligning, detailed in Algorithm 1. By crafting TASTE speech tokenization into the word level, we are able to align it with the text tokens of the LLM, denoted as \\( \\tilde{\\mathbf{q}}, \\tilde{\\mathbf{z}} \\). In practice, we also adopt the word-level averaging technique during the TASTE tokenization training phase, ensuring that the word-level TASTE tokenization facilitates high-quality reconstruction." + }, + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.359, + 0.72, + 0.374 + ], + "angle": 0, + "content": "Algorithm 1 Aligning TASTE with LLM Tokenization via Word-Level Techniques" + }, + { + "type": "algorithm", + "bbox": [ + 0.174, + 0.376, + 0.827, + 0.907 + ], + "angle": 0, + "content": "1: Initialization: Text transcription \\(\\pmb{v} = [\\mathrm{word}_1,\\mathrm{word}_2,\\dots ,\\mathrm{word}_W]\\) ASR tokens of the transcription \\(v^{\\mathrm{asr}} = [v_1^{\\mathrm{asr}},v_2^{\\mathrm{asr}},\\dots ,v_N^{\\mathrm{asr}}]\\) TASTE embedding \\(\\hat{z} = [\\hat{z}_1,\\hat{z}_2,\\dots ,\\hat{z}_N]\\) LLM tokens of the transcription \\(v^{\\mathrm{llm}} = [v_1^{\\mathrm{llm}},v_2^{\\mathrm{llm}},\\dots ,v_M^{\\mathrm{llm}}]\\) \n2: procedure WORDLEVELGROUPING(v, \\(v^{\\mathrm{asr}},\\hat{z},v^{\\mathrm{llm}})\\) \n3: Since \\(v^{\\mathrm{asr}}\\) is a token sequence represents \\(\\mathbf{v}\\) we can easily group it by words: \n4: \\(\\pmb{v}_{\\mathrm{grouped}}^{\\mathrm{asr}}\\leftarrow \\underbrace{\\left[(v_{1}^{\\mathrm{asr}},v_{2}^{\\mathrm{asr}},v_{3}^{\\mathrm{asr}})_{1},(v_{4}^{\\mathrm{asr}})_{2},\\ldots,(v_{N - 1}^{\\mathrm{asr}},v_{N}^{\\mathrm{asr}})_{W}\\right]}_{\\text{word}_1}\\) ▷Group \\(v^{\\mathrm{asr}}\\) by the words of \\(\\pmb{v}\\) \n5: With the word-level grouping from \\(v^{\\mathrm{asr}}\\) , we can group TASTE embedding \\(\\hat{z}\\) as well: \n6: \\(\\hat{z}_{\\mathrm{grouped}}\\gets \\left[(\\hat{z}_1,\\hat{z}_2,\\hat{z}_3)_1,(\\hat{z}_4)_2,\\dots,(\\hat{z}_{N - 1},\\hat{z}_N)W\\right]\\) \n7: Finally, we can group \\(v^{\\mathrm{llm}}\\) following the similar procedure of grouping \\(v^{\\mathrm{asr}}\\) . \n8: \\(\\pmb{v}_{\\mathrm{grouped}}^{\\mathrm{llm}}\\leftarrow \\underbrace{\\left[(v_{1}^{\\mathrm{llm}},v_{2}^{\\mathrm{llm}})_{1},(v_{3}^{\\mathrm{llm}},v_{4}^{\\mathrm{llm}})_{2},\\dots,(v_{M - 2}^{\\mathrm{llm}},v_{M - 1}^{\\mathrm{llm}},v_{M}^{\\mathrm{llm}})_{W}\\right]}_{\\text{word}_2}\\) \n9: Due to the vocabulary mismatch, the grouping of \\(v^{\\mathrm{llm}}\\) is different from \\(v^{\\mathrm{asr}}\\) , \\(\\hat{z}_{\\mathrm{grouped}}\\) \n10: end procedure \n11: procedure WORDLEVELAVERAGING(\\(\\hat{z}_{\\mathrm{grouped}}\\)) \n12: \\(\\bar{z}\\gets []\\) ▷ Initialize a new sequence \n13: for word group index \\(i\\gets 1\\) to \\(W\\) do \n14: word group \\((\\hat{z}_j,\\dots,\\hat{z}_k)\\gets \\hat{z}_{\\mathrm{grouped}}[i]\\) \n15: \\(\\bar{z}_{[j:k]}\\gets \\text{Average} ((\\hat{z}_j,\\dots,\\hat{z}_k))\\) ▷Average the word group \n16: append \\(\\bar{z}_{[j:k]}\\) to \\(\\bar{z}\\) \n17: end for \n18: Resulting in word-level TASTE embedding \\(\\bar{z}\\in \\mathbb{R}^{W\\times d_z}\\) , where \\(W\\) is the word length of \\(V\\) \n19: end procedure \n20: procedure ALIGNWORDLEVELEMBEDGINGWITHLLM( \\(\\bar{z},v_{\\mathrm{grouped}}^{\\mathrm{llm}}\\) ) \n21: \\(\\tilde{z}\\gets []\\) ▷ Initialize a new sequence \n22: for word group index \\(i\\gets 1\\) to \\(W\\) do \n23: word group \\((v_j^{\\mathrm{llm}},\\dots,v_k^{\\mathrm{llm}})\\gets v_{\\mathrm{grouped}}[i]\\) \n24: \\(M\\gets \\text{Length}((v_j^{\\mathrm{llm}},\\dots,v_k^{\\mathrm{llm}}))\\) ▷Get the length of the word group. \n25: for \\(m\\gets 1\\) to \\(M\\) do ▷add \\(M\\times \\bar{z}[i]\\) into the aligned sequence \\(\\tilde{z}\\) \n26: append \\(\\tilde{z}[i]\\) to \\(\\tilde{z}\\) \n27: end for \n28: end for \n29: end procedure \n30: return The LLM-aligned word-level TASTE embedding \\(\\tilde{z}\\) and its codes form \\(\\tilde{\\boldsymbol{q}}\\)" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.092, + 0.331, + 0.106 + ], + "angle": 0, + "content": "A.3 Training Details" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.121, + 0.827, + 0.219 + ], + "angle": 0, + "content": "We separate the training process into the two phases: deriving TASTE tokenization and conducting spoken language modeling with TASTE. In the tokenization phase, only the Aggregator, Quantizer, and the UnitDecoder is trainable. We use the Adam optimizer and the learning rate is set to 0.0016. The batch size is set to 160 seconds on each of the 8 NVIDIA A6000 GPUs we used. Note that in the first 2 epochs the quantization is not applied. From the beginning of the third epoch, quantization is applied and the Quantizer starts to be updated. We train the TASTE tokenizer for 5 epochs, which takes about 2 days for learning, with the learning rate gradually decayed." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.224, + 0.825, + 0.309 + ], + "angle": 0, + "content": "As for the spoken language modeling training phase, we use the AdamW optimizer, the Consine scheduler with the learning rate set to 1e-5. We use 8 Nvidia A6000 GPUs for training. The total batch size summation over the GPUs is set to 768 samples with the gradient accumulation steps set to 2. To reduce the memory overhead and the computational cost, we employ bfloat16 mixed precision during training. Tools such as DeepSpeed [36] and Liger Kernel [13] are also applied to speed up the fine-tuning process." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.333, + 0.346, + 0.346 + ], + "angle": 0, + "content": "A.4 Evaluation Details" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.361, + 0.362, + 0.375 + ], + "angle": 0, + "content": "A.4.1 Human Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.389, + 0.825, + 0.43 + ], + "angle": 0, + "content": "We conduct human listening tests through Amazon Mechanical Turk. In each experiment, we randomly select the same 20 samples from each method; and for each sample we collect more than 10 evaluation scores across different human evaluators." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.454, + 0.827, + 0.511 + ], + "angle": 0, + "content": "MUSHRA In Table 1, we have shown our result of the MUSRHA human listening test [40]. Following [51], we conduct the evaluation with a hidden reference but without a lowerpass-filtered anchor. We instruct evaluators to rate the perceptual quality of the given samples with respect to the ground truth on a scale of 1 to 100." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.534, + 0.825, + 0.577 + ], + "angle": 0, + "content": "Speech Continuation MOS In Table 2, we mention that we have conducted the human listening test to evaluate the overall performance of the speech continuations. Here, we present the instruction for human speech continuation MOS evaluation as follows:" + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.594, + 0.603, + 0.609 + ], + "angle": 0, + "content": "Instruction for Human Speech Continuation MOS Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.621, + 0.799, + 0.647 + ], + "angle": 0, + "content": "In this test, each sample will contain a short audio clip called \"prompt\" (3 seconds) and a longer audio clip called \"prompt+continuation\" (\\(\\sim\\)15 seconds)." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.647, + 0.799, + 0.673 + ], + "angle": 0, + "content": "You will be asked to rate the speech quality of the \"prompt+continuation\" audio clip, specifically focus on the \"continuation\" part." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.673, + 0.799, + 0.698 + ], + "angle": 0, + "content": "The rating should be based on how likely you think that the long audio is a proper continuation of the \"prompt\" audio." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.698, + 0.568, + 0.711 + ], + "angle": 0, + "content": "Specifically, the rating should be based on the following scale:" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.735, + 0.58, + 0.748 + ], + "angle": 0, + "content": "1: Bad - The \"continuation\" is not distinguishable or not natural." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.748, + 0.51, + 0.761 + ], + "angle": 0, + "content": "2: Poor - The \"continuation\" is \\(25\\%\\) distinguishable." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.761, + 0.575, + 0.773 + ], + "angle": 0, + "content": "3: Fair - The \"continuation\" is \\(50\\%\\) distinguishable and natural." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.773, + 0.584, + 0.786 + ], + "angle": 0, + "content": "4: Good - The \"continuation\" is \\(75\\%\\) distinguishable and natural." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.786, + 0.654, + 0.799 + ], + "angle": 0, + "content": "5: Excellent - The \"continuation\" is distinguishable, meaningful, and natural." + }, + { + "type": "list", + "bbox": [ + 0.197, + 0.735, + 0.654, + 0.799 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.823, + 0.662, + 0.836 + ], + "angle": 0, + "content": "Distinguishable means that the words in the \"continuation\" is distinguishable." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.836, + 0.799, + 0.862 + ], + "angle": 0, + "content": "Natural means that the \"continuation\" sounds like a real human voice and a natural continuation of the prompt without considering the content of the speech." + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.862, + 0.799, + 0.887 + ], + "angle": 0, + "content": "Meaningful means that you can not only distinguish the words but also understand the meaning of the whole \"prompt+continuation\"." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.092, + 0.43, + 0.106 + ], + "angle": 0, + "content": "A.4.2 GPT-4o for MOS Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.115, + 0.825, + 0.185 + ], + "angle": 0, + "content": "As introduced in Section 4.2.1, we use GPT-4o to assign MOS scores to the speech continuation results. Here, we describe the detailed procedure. First, whisper-large-v3 is applied to transcribe the generated speech. Then, given the transcription, the text content from the prompt audio, and the instruction template, GPT-4o can produce a score between 1 and 5. The instruction template is provided below:" + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.196, + 0.517, + 0.212 + ], + "angle": 0, + "content": "Instruction Prompt for GPT-4o MOS Evaluation" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.223, + 0.77, + 0.262 + ], + "angle": 0, + "content": "The task is evaluating the relevance and likelihood of the predicted text continuation, given the text prompt. You should also consider whether the meaning of the text continuation is making sense. The text prompt is:" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.262, + 0.278, + 0.274 + ], + "angle": 0, + "content": "{\"prompt\"}" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.275, + 0.447, + 0.285 + ], + "angle": 0, + "content": ", and the text continuation is :" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.286, + 0.286, + 0.299 + ], + "angle": 0, + "content": "{\"content\"}" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.311, + 0.755, + 0.336 + ], + "angle": 0, + "content": "You must give an overall rating from 1 to 5. The rating guideline is as below:" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.337, + 0.79, + 0.35 + ], + "angle": 0, + "content": "1: The text continuation is very unlikely and irrelevant to the text prompt." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.35, + 0.763, + 0.375 + ], + "angle": 0, + "content": "2: The text continuation is unlikely and marginally relevant to the text prompt." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.376, + 0.747, + 0.4 + ], + "angle": 0, + "content": "3: The text continuation is moderately likely and relevant to the text prompt." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.401, + 0.722, + 0.413 + ], + "angle": 0, + "content": "4: The text continuation is likely and relevant to the text prompt." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.413, + 0.668, + 0.425 + ], + "angle": 0, + "content": "5: The text continuation is very likely and highly relevant." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.426, + 0.637, + 0.438 + ], + "angle": 0, + "content": "You should take the following steps to provide the score:" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.439, + 0.668, + 0.45 + ], + "angle": 0, + "content": "First: briefly analyze the sample with the above definition." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.451, + 0.748, + 0.463 + ], + "angle": 0, + "content": "Second: MUST follow the output format as: I would rate the score as _" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.501, + 0.347, + 0.515 + ], + "angle": 0, + "content": "A.5 Additional Results" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.526, + 0.486, + 0.542 + ], + "angle": 0, + "content": "A.5.1 Details on SALMON and StoryCloze" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.551, + 0.825, + 0.58 + ], + "angle": 0, + "content": "Our detailed results on SALMON and StoryCloze are reported in Table 4. The introductions of the two benchmarks—SALMON and StoryCloze—are described below." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.594, + 0.825, + 0.679 + ], + "angle": 0, + "content": "SALMON for Acoustic Evaluation SALMON offers a comprehensive set of metrics designed to evaluate SLMs in multiple dimensions. In summary, each test sample consists of a positive sample and a negative sample. The negative sample differs from the positive sample by having some segments altered. These alterations include changes in speaker, gender, environment (e.g., room acoustics), or sentiment in the middle of the utterance. The SLM serves as an anomaly detector that aims to distinguish between the pairs of positive and negative samples. The distinction is based on" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.71, + 0.825, + 0.766 + ], + "angle": 0, + "content": "Table 4: The evaluation results on SALMON and StoryCloze of different SLMs, and BG means background. We report likelihood-based accuracy on SALMON (acoustic aspect) and StoryCloze (semantic aspect). The baseline (S3 token) is conducted by joint speech-text modeling with the S3 token as speech tokenization." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.766, + 0.825, + 0.909 + ], + "angle": 0, + "content": "
METHODLoRASALMON (ACOUSTIC CONSISTENCY)STORYCLOZE
SentimentSpeakerGenderRoomBG (domain)BG (rand.)sSC / tSC
Previous Work
TWIST 1.3B ([11])X61.5±3.469.0±3.369.5±3.359.0±3.555.5±3.560.5±3.552.4±0.8 / 70.6±0.7
TWIST 7B ([11])X61.5±3.471.0±3.270.0±3.262.0±3.455.5±3.560.5±3.555.3±0.8 / 74.1±0.7
Spirit LM ([33])X54.5±3.569.5±3.367.0±3.354.5±3.553.5±3.555.5±3.561.0±0.8 / 82.9±0.6
Spirit LM Expr. ([33])X73.5±3.181.0±2.885.0±2.554.5±3.556.0±3.564.0±3.456.9±0.8 / 75.4±0.7
Ours
Baseline (S3 token)49.5±3.548.8±3.548.8±3.549.5±3.555.3±3.549.5±3.554.4±0.8 / 63.0±0.8
TASLM 1B (token)59.0±3.568.0±3.370.5±3.261.0±3.452.0±3.554.0±3.564.2±0.8 / 88.9±0.5
TASLM 1B (embedding)57.5±3.567.0±3.375.5±3.050.0±3.547.0±3.549.0±3.564.0±0.8 / 89.5±0.5
" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.121 + ], + "angle": 0, + "content": "the likelihood score given by each SLM, which is then evaluated with the overall precision between the ground truth and the prediction." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.135, + 0.827, + 0.233 + ], + "angle": 0, + "content": "StoryCloze for Semantic Evaluation To evaluate the SLMs' ability to comprehend semantic coherence and logical reasoning, we employ the spoken version of StoryCloze test (sSC) and the Topic StoryCloze test (tSC) assembled by ([11]). Assessment of narrative understanding involves presenting a four-sentence story setup, followed by two possible endings. These tasks require the model to select the most appropriate conclusion, thereby testing its grasp of causal and temporal relationships within a narrative. Similarly to SALMON, we measure the accuracy of the distinctions based on the likelihood scores." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.247, + 0.445, + 0.263 + ], + "angle": 0, + "content": "A.5.2 Report of Standard Deviations" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.27, + 0.782, + 0.287 + ], + "angle": 0, + "content": "We report the standard deviations of our tables in the main text to allow further investigation." + }, + { + "type": "table_caption", + "bbox": [ + 0.318, + 0.307, + 0.679, + 0.32 + ], + "angle": 0, + "content": "Table 5: Results with standard deviations of Table 1" + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.32, + 0.825, + 0.486 + ], + "angle": 0, + "content": "
MethodBitrateQUALITYSIMILARITY
WER ↓UTMOSDNSMOSViSQOLDrtn. Con.Spkr. Sim.MUSHRA
Ground Truth256k2.1%±0.074.09±0.323.84±0.26---76.6±15.9
Encodec [5]15005.1%±0.111.58±0.343.26±0.243.46±0.280.94±0.0030.63±0.10-
30002.6%±0.082.35±0.533.48±0.253.81±0.270.96±0.0020.78±0.0725.6±18.6
SpeechTokenizer [51]5005.2%±0.111.27±0.052.99±0.172.80±0.240.94±0.0030.35±0.09-
20003.0%±0.083.56±0.433.60±0.283.65±0.220.97±0.0020.80±0.0653.9±22.9
40002.5%±0.083.90±0.363.76±0.284.03±0.170.98±0.0020.92±0.04-
Mimi [6]10003.1%±0.093.60±0.373.60±0.303.62±0.260.96±0.0020.82±0.0667.6±19.8
S3 token (topline) [7]6003.0%±0.094.18±0.273.90±0.243.30±0.260.96±0.0020.82±0.0970.2±17.0
Text-only (baseline)~505.9%±0.114.31±0.164.11±0.222.44±0.230.57±0.0060.78±0.0942.6±27.1
TASTE (ours)~1504.4%±0.114.29±0.184.10±0.223.05±0.260.91±0.0030.80±0.0868.3±17.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.317, + 0.516, + 0.68, + 0.528 + ], + "angle": 0, + "content": "Table 6: Results with standard deviations of Table 2." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.529, + 0.825, + 0.693 + ], + "angle": 0, + "content": "
MethodFinetuned / base parametersCONTINUATIONLIKELIHOOD
GPT-4oUTMOSHumanSALMONStoryClozeOverall
Cascade
Cascade (LLaMA3.2-1Bα)-3.15±1.274.25±0.224.00±1.28---
Cascade (LLaMA2-7Bβ)-3.43±1.274.25±0.253.98±1.29---
Spoken LMs
TWIST 1.3B [11]1.3B / 1.3Bθ1.48±0.703.25±0.481.95±1.0162.5±1.461.5±0.562.0±0.7
TWIST 7B [11]7B / 7Bγ1.44±0.703.27±0.522.04±0.9163.4±1.464.7±0.564.1±0.7
Spirit LM [33]7B / 7Bβ2.79±1.063.41±0.192.38±0.8159.1±1.472.0±0.565.6±0.7
Spirit LM Expr. [33]7B / 7Bβ1.90±1.033.40±0.302.41±0.9669.0±1.366.2±0.567.6±0.7
Baseline (S3 token)45M / 1.3Bα1.37±0.874.04±0.272.84±1.1150.2±1.458.7±0.654.5±0.8
TASLM 1B (token)45M / 1.3Bα3.08±1.374.07±0.283.93±1.3060.8±1.476.5±0.568.7±0.7
TASLM 1B (embed.)45M / 1.3Bα3.16±1.334.22±0.214.16±1.2057.7±1.476.7±0.567.2±0.7
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.179, + 0.694, + 0.482, + 0.704 + ], + "angle": 0, + "content": "Base models: \\(\\alpha\\) LLaMA3.2-1B, \\(\\beta\\) LLaMA2-7B, \\(\\gamma\\) LLaMA-7B, \\(\\theta\\) OPT-1.3B" + }, + { + "type": "table_caption", + "bbox": [ + 0.317, + 0.736, + 0.68, + 0.749 + ], + "angle": 0, + "content": "Table 7: Results with standard deviations of Table 3." + }, + { + "type": "table", + "bbox": [ + 0.322, + 0.75, + 0.678, + 0.88 + ], + "angle": 0, + "content": "
MethodModeWeb Q.LLaMA-Q.
Mini-Omni 0.5B(T→T)T21.3±0.939.0±2.8
Mini-Omni 0.5B [46]T+A4.5±0.511.6±1.8
Helium 7B (text)T32.3±1.075.0±2.5
Moshi 7B [6]T+A26.6±1.062.3±2.8
LLaMA3.1-8B-InstructT60.4±1.171.7±2.6
Llama-Omni-8B [9]T+A35.5±1.167.3±2.7
LLaMA3.2-1B†T24.0±0.951.0±2.9
TASLM 1B (embed.)†T+A27.1±1.057.6±2.9
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.328, + 0.881, + 0.612, + 0.894 + ], + "angle": 0, + "content": "We apply few-shot learning to facilitate question answering." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "18" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07053/5069c415-7c54-431e-b348-92054587ecc3_origin.pdf b/data/2025/2504_07xxx/2504.07053/5069c415-7c54-431e-b348-92054587ecc3_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1f4531832e73fb1dff2e2657d992896192f80568 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/5069c415-7c54-431e-b348-92054587ecc3_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e6d9726ee65bdb16f53063c7732a996dde48b511f281104166d7663edcc6e53 +size 3302524 diff --git a/data/2025/2504_07xxx/2504.07053/full.md b/data/2025/2504_07xxx/2504.07053/full.md new file mode 100644 index 0000000000000000000000000000000000000000..8dfa6fda22ba31e569f8c70b06181ba601c3e2e7 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/full.md @@ -0,0 +1,441 @@ +# TASTE: Text-Aligned Speech Tokenization and Embedding for Spoken Language Modeling + +Liang-Hsuan Tseng\*23 Yi-Chang Chen\*1 Kuan-Yi Lee\*23 Da-Shan Shiu\*1 Hung-yi Lee\*3 + +*Equal contribution 1MediaTek Research + +$^{2}$ Internship at MediaTek Research $^{3}$ National Taiwan University + +{yi-chang.chen, ds.shiu}@mtkresearch.com + +{f11921067,b10901091,hungyilee}@ntu.edu.tw + +# Abstract + +Recent efforts target spoken language models (SLMs) that not only listen but also speak for more natural human-LLM interaction. Joint speech-text modeling is a promising direction to achieve this. However, the effectiveness of recent speech tokens for joint modeling remains underexplored. To address this, we introduce Text-Aligned Speech Tokenization and Embedding (TASTE), a method that directly addresses the modality gap by aligning speech token with the corresponding text transcription during the tokenization stage. We propose a method that can achieve this through a attention-based aggregation mechanism and with speech reconstruction as the training objective. We conduct extensive experiments and show that TASTE can preserve essential paralinguistic information while dramatically reducing the token sequence length. With TASTE, we perform straightforward joint spoken language modeling by using Low-Rank Adaptation on the pre-trained text LLM. Experimental results show that TASTE-based SLMs perform comparable to previous work on SALMON and StoryCloze; while significantly outperform other pre-trained SLMs on speech continuation across subjective and objective evaluations. To our knowledge, TASTE is the first end-to-end approach that utilizes a reconstruction objective to automatically learn a text-aligned speech tokenization and embedding suitable for spoken language modeling. Our demo, code, and model are available at https://mtkresearch.github.io/TASTE-SpokenLM.github.io. + +# 1 Introduction + +Learning a speech representation suitable for universal speech-processing tasks has long been a significant challenge [31, 42, 48]. Unlike text—which can be encoded discretely [18, 19, 39]—speech is a continuous waveform carrying layered information (acoustic, semantic, prosodic, etc.). Recent neural self-supervised learning (SSL) methods move beyond filter-banks and MFCCs to encode raw audio into compact, high-dimensional embeddings that excel on discriminative tasks such as automatic speech recognition (ASR), emotion recognition, and speaker verification. Despite these gains, learning representations for generative speech tasks remains an open and more complex problem that has begun to attract focused attention [17, 28, 44, 45]. + +Among the generative speech tasks, spoken language modeling (SLM) is an intriguing direction, aiming to create models that can not only listen but also speak. Typically, building an SLM requires two stages: first, deriving speech tokenizations; second, training a language model on the speech tokens. For the speech tokens, previous approaches either apply SSL-based representations following by discretization techniques [11, 14, 21, 32] or reuse units from neural codec models like EnCodec + +![](images/46c84b75462a0de2c6f9d19b33a64cae57a50c650d7f2f43f7c1867ad55d4557.jpg) +Figure 1: The concept overview. Conventional methods extract speech tokens solely from speech, inducing length-mismatch problem when conducting joint speech-text modeling. By taking dual modalities as input, we generate speech tokenization that is aligned with text, facilitating straightforward and effective joint modeling. + +![](images/c6d5132251cd60884addea1cc932e94c730e578aeb4eca458f8ca4823797f9a7.jpg) + +and SoundStream [5, 20, 41, 49]. Although autoregressive modeling with these speech tokens shows great potential in text-to-speech (TTS) [45, 47], previous SLMs that model only speech tokens [21, 32] have been shown to lack semantic fidelity [22]. + +To bridge this gap, one promising direction is to utilize text—which is rich in semantic—during spoken language modeling. TWIST [11] shows that SLMs can benefit from initializing with text LLMs. More recent work often conducts joint speech-text modeling on tokens of both modalities to facilitate the semantic coherence on the generated speech [6, 9, 33, 46]. Yet integrating text and speech tokens introduces a length-mismatch challenge, as speech token sequences are usually longer than their text counterparts. Common remedies may include interleaving speech and text tokens [33] or inserting padding to synchronize sequence lengths between modalities [6, 9, 46]. However, these methods require either additional speech-text alignment or heuristic rules to enable joint modeling. + +In this work, we introduce Text-Aligned Speech Tokenization and Embedding (TASTE), a special type of speech tokenization tailored for speech-text joint spoken language modeling. By acknowledging that the length mismatch introduces additional complexity in joint modeling, we develop our speech token to be aligned with its corresponding text transcription tokens. To achieve this, we first obtain the textual transcription of a speech with the ASR model; then we derive the speech token based on the transcription through a specialized cross-attention mechanism for speech reconstruction. Note that the full process can be accomplished in an end-to-end manner, with no explicit speech-text alignment required. Unlike previous speech tokens that are developed under a fixed stride with fixed down-sampling rate, our speech token has dynamic frequency as it is text-aligned. Figure 1 shows an overall concept of TASTE, illustrating how our tokenization allows straightforward joint modeling. + +To evaluate the effectiveness of TASTE, we first conduct extensive experiments on speech reconstruction. Our results on LibriSpeech [34] show that TASTE not only resynthesizes speech in high quality, but also retains similarity to the original speech. TASTE achieves high-end reconstruction at an extremely low bit rate ( $\sim 150$ bps); while the other comparable methods are often more than thousands of bps. More intriguingly, we demonstrate that TASTE allows simple text-aligned speech editing. By exchanging the partial text-aligned speech tokens from two different utterances with the same content, we demonstrate that the paralinguistic information such as duration and tone can be exchanged precisely following the words being exchanged, resulting in natural edited speech. + +On the other hand, we demonstrate that TASTE successfully allows effective spoken language modeling. We perform straightforward joint modeling with TASTE under Low-Rank Adaptation [15]. We first perform speech continuation experiments with 3-second speech prompts given. The evaluation is three-fold. We use GPT-4o for evaluating the semantic aspect; UTMOS [38] for the acoustic aspect; and the human listening test for the general evaluation. Results show that our SLMs not only generate natural, meaningful speech continuations, but also outperforms the other 7B pre-trained SLMs across all the continuation evaluation aspects with 1.3B parameters. We also evaluate our SLMs on two benchmarks, SALMON [25] and StoryCloze [11] and our results show that our SLMs achieve comparable performance compared to the other speech-text joint modeling methods. Moreover, we show that our pretrained SLM can perform spoken question answering under few-shot scenario. + +In summary, we derive TASTE, a text-aligned speech tokenization that allows effective joint speech-text spoken language modeling. By aligning the speech tokenization with its text counterpart during + +![](images/1ec9832577223e0e450fbaf505a535d7ac71357bf1e0ccf1924ef8ca6ebe0e86.jpg) +Figure 2: The overall framework of our text-aligned speech tokenization and embedding. The left side illustrate the process of obtaining the TASTE tokenization $\hat{z}$ , detailed in Section 2.1.1; while the right side demonstrate how we reconstruct the speech with TASTE (Section 2.1.2). The training objective for our speech reconstruction is discussed in Section 2.1.3. + +the tokenization stage, TASTE enables straightforward modeling. To our best knowledge, we are the first one to utilize the reconstruction objective to automatically derive a text-aligned speech tokenization and embedding that is suitable for joint speech-text spoken language modeling. Our demo is available at https://mtkresearch.github.io/TASTE-SpokenLM.github.io. + +# 2 Method + +We propose text-aligned speech tokenization and embedding (TASTE) to facilitate effective joint speech-text spoken language modeling. Here, we first introduce how we derive our tokenization—TASTE—in Section 2.1, and then discuss how we use TASTE for spoken language modeling (§ 2.2). + +# 2.1 Building TASTE + +As depicted in Figure 2, TASTE is comprised of the two main components: the text-aligned speech tokenizer ( $\S$ 2.1.1) that produces the text-aligned speech tokenization; and the speech decoder ( $\S$ 2.1.2) to reconstruct speech based on the text token and the TASTE speech token aligned with it. The training objective of speech reconstruction is described in Section 2.1.3. + +# 2.1.1 TASTE Speech Tokenizer + +In TASTE, the speech tokenizer, denoted as Tokenizer(\cdot), is designed to generate the text-aligned speech tokenization and embedding with the speech-text pair $X = (u, v)$ taken as input, where $v$ represents the textual transcription of the speech utterance $u$ , which can be easily obtained through an automatic speech recognition (ASR) system. Recent developments in robust and efficient ASR ([10, 35]) allow us to focus on discussing how to derive the text-aligned speech token effectively by assuming that $v$ is of sufficient quality. The TASTE speech tokenizer is composed of three major components: an encoder, an aggregator, and a quantizer. + +The encoder Encoder $(\cdot)$ contains $L$ layers of Transformer ([43]) encoder blocks and is used to extract high-dimensional speech representation. We employ the pre-trained Whisper ASR encoder [35] as our speech encoder, and it is frozen during training. For an input speech utterance $\pmb{u}$ , the encoder produces a sequence of hidden states from each layer $[h^{(1)}, h^{(2)}, \dots, h^{(L)}]$ . In our experiments, we retain the last hidden layer representation $h^{(L)}$ and the shallow representation $h^{(l)}$ from the first half of the hidden representations of the encoder for later usage, denoted as: + +$$ +\boldsymbol {h} ^ {(L)}, \boldsymbol {h} ^ {(l)} = \operatorname {E n c o d e r} (\boldsymbol {u}), \quad \text {w h e r e} 1 \leq l \leq \left\lfloor \frac {L}{2} \right\rfloor . +$$ + +Note that both of the hidden representations $\pmb{h}^{(L)}, \pmb{h}^{(l)} \in \mathbb{R}^{T \times d_h}$ have their length denoted as $T$ and the hidden dimension indicated by $d_h$ . + +The hidden representations extracted from the encoder are then passed to the aggregator. The aggregator is designed to obtain a more compressed speech representation $\mathbf{z}$ that is aligned in length with the text transcription $\mathbf{v}$ . Consider that $\mathbf{v} = [v_{1}, v_{2}, \dots, v_{N}]$ , $v_{i} \in \mathbb{V}$ is a text token sequence with length $N$ , the input and output of the aggregator can be denoted as: + +$$ +\boldsymbol {z} = \operatorname {A g g r e g a t o r} (\boldsymbol {v}, \boldsymbol {h} ^ {(L)}, \boldsymbol {h} ^ {(l)}), \text {w h e r e} \boldsymbol {z} \in \mathbb {R} ^ {N \times d _ {z}}, \boldsymbol {v} \in \mathbb {V} ^ {N}, \text {a n d} \boldsymbol {h} ^ {(L)}, \boldsymbol {h} ^ {(l)} \in \mathbb {R} ^ {T \times d _ {h}}. +$$ + +To make the speech representation $z$ text-aligned, we conduct a simple yet effective attention mechanism based on the three inputs. Consider that the original multi-head attention in [43] is denoted as MultiHead( $Q, K, V$ ), our first layer attention in the aggregator takes: + +$$ +Q = \text {t e x t} v, \quad K = \text {e n c o d e r} h ^ {(L)}, \quad V = \text {e n c o d e r} h ^ {(l)}. +$$ + +By doing so, the length of our first multi-head attention output should follow the text transcription $v$ . Note that the query of the following layers becomes the output from the previous layer. In addition, intuitions of using the encoder's last hidden representation as keys, and the shallow hidden representation as values can be described as follows: 1) In Transformer-based ASR models, the last hidden states often encode rich speech-text alignment cues; sometimes the cross-attention weight matrices can even be exploited as soft word-alignment maps [10, 35]. 2) The shallow representation has been shown to support high-quality speech reconstruction even when the quantization is applied [7, 8]. Based on the above observations, we design our aggregator that can use the soft attention maps obtained from last encoder representations and the text transcriptions, to aggregate the shallow encoder representations that is beneficial for high-end speech reconstruction. + +After getting the text-aligned representation, the quantizer $\mathrm{Quantizer}(\cdot)$ is adopted to discretize the text-aligned representation. We use the residual vector quantization (RVQ) to allow coarse-to-fine quantization. Given the text-aligned speech representation $z$ and the quantizer containing $R$ residual vector quantization layers, we generate: + +$$ +\boldsymbol {q}, \hat {\boldsymbol {z}} = \operatorname {Q u a n t i z e r} (\boldsymbol {z}), \quad \boldsymbol {q} = \left[ \boldsymbol {q} ^ {(1)}, \boldsymbol {q} ^ {(2)}, \dots , \boldsymbol {q} ^ {(R)} \right], \quad \hat {\boldsymbol {z}} = \sum_ {r = 1} ^ {R} \hat {\boldsymbol {z}} ^ {(r)} \tag {1} +$$ + +where each $\pmb{q}^{(r)} \in \mathbb{C}^N$ denotes the $r$ -th layer code sequence with code set $\mathbb{C}$ ; and the quantized embedding $\hat{\pmb{z}}$ to be the summation over each layer of the codebook vectors. Note that both of the code sequence and the quantized speech embedding $\hat{\pmb{z}}$ are text-aligned, with the lengths to be $N$ . + +# 2.1.2 TASTE Speech Decoder + +The speech decoder aims to perform speech reconstruction conditioned on the text token sequence and the text-aligned speech tokenization. As shown in Figure 2, the text and speech tokens are aligned in lengths and being fed into the speech decoder after weighted sum in an autoregressive manner. The speech decoder is composed of the two components: the unit decoder and the unit-to-speech vocoder. + +The unit decoder UnitDecoder $(\cdot)$ is a Transformer-based decoder that takes the text token sequence $\mathbf{v}$ and the aligned speech embedding $\hat{\mathbf{z}}$ as input and predicts the speech unit $\mathbf{y}$ for reconstruction: + +$$ +\boldsymbol {y} = \operatorname {U n i t D e c o d e r} (\hat {\boldsymbol {z}}, \boldsymbol {v}). \tag {2} +$$ + +Note that the additional speaker embedding is also taken as input to facilitate global speaker voice control in our spoken language models [16]. After we generating the speech unit $\mathbf{y}$ , we use a unit-to-speech vocoder to further transform the unit into the reconstructed speech. + +# 2.1.3 Training Objective + +Similar to other reconstruction-based speech tokens [24, 51], we derive TASTE by training it for speech resynthesis. To achieve this, we extract the speech unit $\pmb{y}^{\mathrm{target}}$ with length $T'$ from the original speech $u$ as the target unit for our speech tokenizer and speech decoder. Given the text transcription $v$ , the TASTE speech embedding $\hat{z}$ , and the unit from the original speech $\pmb{y}^{\mathrm{target}}$ as the target, the speech reconstruction through the tokenizer and the unit decoder parametrized by $\theta$ under the next prediction schema can be considered as minimizing the cross-entropy loss below: + +$$ +\mathcal {L} _ {\mathrm {c e}} (\theta) = \frac {1}{| T ^ {\prime} |} \sum_ {t = 1} ^ {T ^ {\prime}} - \log p _ {\theta} \left(y _ {t} ^ {\text {t a r g e t}} \mid \hat {\boldsymbol {z}}, \boldsymbol {v}; \boldsymbol {y} _ {< t} ^ {\text {t a r g e t}}\right) \tag {3} +$$ + +On the other hand, we employ the quantization loss as well to tokenize the continuous representation $z$ extracted from the encoder-aggregator. Following prior works [5, 49], given that $z^{(r)}$ is the $r$ -th residual and $\hat{z}^{(r)}$ indicates the $r$ -th quantized residual, the commitment loss is defined as: + +$$ +\mathcal {L} _ {\mathrm {r v q}} (\theta) = \sum_ {r = 1} ^ {R} \| z ^ {(r)} - \hat {z} ^ {(r)} \|. \tag {4} +$$ + +By summation over both losses, we formulate the overall loss for training TASTE as: + +$$ +\mathcal {L} _ {\text {t a s t e}} = \mathcal {L} _ {\mathrm {c e}} + \mathcal {L} _ {\mathrm {r v q}}. \tag {5} +$$ + +Note that to allow gradient to back-propagate from the unit decoder through the tokenizer, the straight-through estimation technique is applied towards the quantization process during training. + +# 2.2 TASTE for Spoken Language Modeling + +Next, we describe how we conduct effective spoken language modeling with TASTE. Following previous work [11, 33], we perform pre-training on speech data. The text transcription of the speech data is also used for joint speech-text pre-training of our text-aligned spoken language model (TASLM). Since TASTE tokenization already aligns with the text token sequence, we can conduct a straightforward joint modeling, as illustrated in Figure 1. To demonstrate the robustness of TASTE, we perform two types of text-aligned spoken language modeling. First, we build $\mathrm{TASLM}_{\mathrm{token}}$ over our text-aligned speech token $q$ , discussed in Section 2.2.1. Then, we show how we build $\mathrm{TASLM}_{\mathrm{emb}}$ with our text-aligned speech embedding $\hat{z}$ , detailed in Section 2.2.2. + +# 2.2.1 Modeling TASTE Token + +As our speech tokens derived from the RVQ quantizer contain $R$ layers of codes, we employ $R$ linear heads for multi-head prediction in our $\mathrm{TASLM}_{\mathrm{token}}$ . Namely, the $\mathrm{TASLM}_{\mathrm{token}}$ simultaneously predicts the next text token and the corresponding $R$ layers of speech tokens in each step. The overall training objective follows the original next token prediction scheme, but with multiple predictions across modalities at each step. Specifically, given the text transcription $v$ and $R$ layers of quantized RVQ codes $q$ , the multi-head next-token prediction training objective can be formulated as: + +$$ +\mathcal {L} _ {\mathrm {t o k e n}} (\phi) = \frac {1}{| N |} \sum_ {i = 1} ^ {N} \left(- \log p _ {\phi} ^ {\text {t e x t}} \left(v _ {i} \mid \boldsymbol {v} _ {< i}, \boldsymbol {q} _ {< i}\right) + \sum_ {r = 1} ^ {R} - \log p _ {\phi} ^ {(\mathrm {r})} \left(q _ {i} ^ {(\mathrm {r})} \mid \boldsymbol {v} _ {< i}, \boldsymbol {q} _ {< i}\right)\right), \tag {6} +$$ + +with $\phi$ represents the parameter of the TASLM token, and $p^{(r)}$ is the $r$ -th probability prediction for the $r$ -th RVQ code. As for inference, we directly sample the codes and the text simultaneously, and transform the codes into the corresponding embedding for the speech decoder to generate speech. + +# 2.2.2 Modeling TASTE Embedding + +Besides the token code sets, recent progress on latent modeling [17, 28] motivates us to conduct experiments on modeling our text-aligned speech embedding. Referencing MELLE [28], we employ a linear layer that predicts the mean vector $\mu_{i}$ and a log-magnitude variance vector $\log \sigma_i^2$ , where $i$ indicates the $i$ -th frame of the sequence. And the final predicted latent of frame $i$ is denoted as $e_i = \mu_i + \sigma_i\odot \epsilon$ , where $\epsilon \sim \mathcal{N}(0,I)$ . Following MELLE, the straight-through estimator is applied to allow gradients to back-propagate properly during training. + +To facilitate latent prediction, we apply the regularization loss and the Kullback-Leibler (KL) divergence loss driving training, which is described as follows: + +$$ +\mathcal {L} _ {\mathrm {r e g}} (\psi) = \left\| \boldsymbol {e} _ {\psi} - \hat {\boldsymbol {z}} \right\| _ {2} ^ {2}, \quad \mathcal {L} _ {\mathrm {K L}} = \frac {1}{2} \sum_ {i = 1} ^ {N} \sum_ {j = 1} ^ {d _ {z}} \left(\sigma_ {i} [ j ] + \left(\mu_ {i} [ j ] - \hat {z} _ {i} [ j ]\right) ^ {2}\right) - 1 - \log \sigma_ {i} ^ {2} [ j ]), \tag {7} +$$ + +where $\psi$ indicates the parameter of TASLMemb, and $d_{z}$ is the dimension of our text-aligned embedding $\hat{z}$ . The regularization loss $\mathcal{L}_{\mathrm{reg}}$ is adopted to predict close latent towards the target embedding $\hat{z}$ . The KL divergence loss calculates the KL divergence between the predicted latent distribution and the target distribution. Following MELLE, we select the target distribution to be $\mathcal{N}(\hat{z}_i,I)$ . + +This allows simplification of $\mathcal{L}_{\mathrm{KL}}$ , which can then be approximated with the predicted vectors $\mu_i, \sigma_i$ , and the target embedding $\hat{z}_i$ . Finally, the overall loss along with the text loss is described as: + +$$ +\mathcal {L} _ {\mathrm {e m b}} (\psi) = \lambda_ {\mathrm {r e g}} \cdot \mathcal {L} _ {\mathrm {r e g}} + \lambda_ {\mathrm {K L}} \cdot \mathcal {L} _ {\mathrm {K L}} + \frac {1}{| N |} \sum_ {i = 1} ^ {N} - \log p _ {\psi} ^ {\text {t e x t}} \left(v _ {i} \mid \boldsymbol {v} _ {< i}, \hat {\boldsymbol {z}} _ {< i}\right), \tag {8} +$$ + +where $\lambda_{\mathrm{reg}}$ , $\lambda_{\mathrm{KL}}$ to be the weighted coefficients of the two losses, respectively. + +# 3 Experiment Setup + +# 3.1 Model Configuration + +For our TASTE speech tokenizer, we initialize our encoder from Whisper [35]. Specifically, we use whisper-large-v3 for our initialization. By doing so, we can reduce computational cost between obtaining the ASR transcription and extracting the TASTE tokenization with the TASTE encoder frozen during training. On the other hand, we use the S3 token from CosyVoice [7] as the target unit for speech reconstruction. Since their speech tokenization facilitates additional speaker embedding, we follow the same procedure to obtain one. Adding speaker embedding allows global speaker voice control, which is a reasonable and useful scenario for spoken language models. The unit-to-speech vocoder is comprised of a flow model [23, 27] and a HifiGAN. We use the published pre-trained ones from [7], and they are not involved in our training. For the quantizer, we set the RVQ layer $R = 4$ , the codebook size 512, and the codebook dimension to be 256. For the spoken language modeling, we follow previous work [6, 11, 22, 33] and initialize our spoken language model from a text LLM. However, this introduces the vocabulary mismatch problem between the ASR and LLM. We resolve this issue by using word-level TASTE tokenization and embedding, which is detailed in Appendix A.2. Moreover, we conduct Low-Rank Adaptation (LoRA) for parameter-efficient fine-tuning of our TASLMs. We set the corresponding hyperparameters rank $r = 64$ and $\alpha = 128$ . + +# 3.2 Dataset + +We use two datasets—Emilia and LibriTTS—as our training datasets. Emilia [12] is an in-the-wild dataset where the speech is web-scaled and the transcriptions are pseudo-labeled. We use only the English subset of this multi-lingual corpus, which is about 40,000 hours. LibriTTS [50] is a reading-style corpus based on LibriSpeech [34]. We use all the training splits in LibriTTS for training, which is approximately 600 hours of speech. In addition, the test-clean split in LibriSpeech is used for evaluation purposes for our TASTE tokenizer and TASLMs. + +# 4 Result + +We separate the evaluation into two phases: Section 4.1 shows the results regarding our TASTE tokenization; while Section 4.2 evaluates our TASLM across multiple aspects, including acoustic, semantic, and continuation. For clarity, the metrics are introduced within each section. + +# 4.1 Results of TASTE Tokenization + +# 4.1.1 Speech Reconstruction Evaluation + +We first represent the speech reconstruction evaluation results. For comprehensive evaluation, we use different metrics, including the reference-free metrics for quality assessment, and the reference-based metrics for evaluating the similarity between the reconstructed and the original speech. + +Quality Assessment We use ASR-WER, UTMOS [38], and DNS-MOS [37] as our metrics for evaluating the speech quality. For ASR-WER, we use HuBERT-Large [14] as the ASR model to transcribe the speech, and then calculate the word-error rate (WER) on the transcription. ${}^{1}$ UTMOS and DNS-MOS are both neural-based MOS predictors. While both evaluate the speech quality, the design purpose of DNS-MOS makes it more suitable for evaluation regarding the noise levels. + +Table 1: The speech tokenization evaluation results on the test-clean split of LibriTTS. The evaluation is separated into the QUALITY and the SIMILARITY assessments, as introduced in Section 4.1.1. We use gray text to indicate the worst-performing methods in each metric. + +
MethodBitrateQUALITYSIMILARITY
WER ↓UTMOSDNSMOSViSQOLDrtn. Con.Spkr. Sim.MUSHRA
Ground Truth256k2.1%4.093.84---76.6
Encodec [5]15005.1%1.583.263.460.940.63-
30002.6%2.353.483.810.960.7825.6
SpeechTokenizer [51]5005.2%1.272.992.800.940.35-
20003.0%3.563.603.650.970.8053.9
40002.5%3.903.764.030.980.92-
Mimi [6]10003.1%3.603.603.620.960.8267.6
S3 token (topline) [7]6003.0%4.183.903.300.960.8270.2
Text-only (baseline)~505.9%4.314.112.440.570.7842.6
TASTE (ours)~1504.4%4.294.103.050.910.8068.3
+ +Similarity Assessment For similarity, we measure ViSQOL, duration consistency (Drtn. Con.), speaker similarity (Spkr. Sim.) and the MUSHRA test as human evaluation. We use ViSQOL [4] is a production-ready tool that predicts speech quality via spectro-temporal image similarity comparisons. For the duration consistency, we first get the word-level alignment of the transcriptions of the original and the reconstructed speech using Montreal Forced Aligner [26]; then we calculate if the duration between each of the same words is matched under a preset tolerance window, which is set to 50 milliseconds. For MUSHRA human listening test, we reference the original protocol [40] to instruct evaluators to rate the similarity and quality on a scale of 1 to 100 with reference given. + +Speech Reconstruction Results The evaluation results of our speech reconstruction on LibriSpeech are shown in Table 1. We highlight that our TASTE speech tokenization has the lowest bitrate among all the other speech tokenization methods. Note that since the speech tokenization is dynamic in frequency, we estimate our bitrate based on calculating the overall token count and the duration over the testing set. Despite the low bitrate, we generally attain much better performance comparing with the worst performing methods (gray text in the table) across each metric. Moreover, on the quality assessment, our MOS prediction scores are the second highest and even surpasses the ground truth, showcasing the reconstructed speech is of high quality. Next, we focus on the results of the similarity assessment. As for the duration consistency, we score the second-worst performance comparing with the other methods. We attribute this to the fact that our tokenization compress the sequence in a very dynamic way. Despite that, we still outperform the text-only method with a large margin, perform close towards other speech tokenization methods which all have a fixed down-sampling rate. Lastly, our method attains the second-highest MUSHRA score (excluding the ground-truth anchor). This highlights TASTE's effectiveness: even without reproducing every microscopic detail, it still yields perceptually high-quality speech in human listening tests. Overall, TASTE carries rich paralinguistic information, facilitating high-end speech reconstruction under an extremely low bitrate. + +# 4.1.2 TASTE for Text-Aligned Speech Editing + +After comprehensive speech-reconstruction experiments, we show that TASTE can also perform text-aligned speech editing. Suppose we have two utterances with the same transcript but different paralinguistic characteristics. By exchanging their TASTE token sequences word by word, we ask whether the associated paralinguistic traits are transferred as well. To make the effect easy to see, we choose utterances that differ mainly in speaking rate and focus on duration changes. The overall text-aligned editing procedure is describe as follows: 1) Extract the TASTE tokens $\hat{z}^{\mathrm{orig}}$ for each source utterance 2) Swap the tokens at the desired text positions, resulting in edited TASTE tokens $\hat{z}^{\mathrm{edit}}$ . 3) Decode the edited token sequence $\hat{z}^{\mathrm{edit}}$ back to speech. In Figure 3, we present the alignments obtained from MFA [26] of the original speech and the speech after editing, and compare them horizontally. As shown in the figure, words whose tokens were swapped exhibit clear duration shifts, while the untouched words keep their original timing—evidence that TASTE enables precise, text-aligned manipulation. Additional examples that target other paralinguistic dimensions are provided on our demo page. + +![](images/2f0bfffa21da24cd7b583028f31b507e7681918acc4a32cdc33e9ee74b4bd996.jpg) +Figure 3: An illustration of TASTE for text-aligned speech editing. On the left shows the process of our text-aligned speech editing. We first extract the TASTE tokens; swap the tokens partially; and then decode the edited TASTE tokens into edited speech. On the right shows an example visualization. Only the durations of the words with exchanged TASTE tokens show significant difference. + +![](images/81e12a75d5eaba0cef27031e19cf6686815234b8941b4228e622e8d418018a9b.jpg) + +# 4.2 Evaluating Text-Aligned Spoken Language Modeling + +To provide a comprehensive evaluation of our text-aligned spoken language modeling (TASLM), we first compare our pre-trained SLM with other methods through speech continuation and likelihood-based benchmarks in Section 4.2.1. Then, to further investigate the understanding capabilities, we conduct evaluation with spoken question answering in Section 4.2.2. + +# 4.2.1 Comparing TASLM with Pretrained SLMs + +Speech Continuation Evaluation A typical way to evaluate the pre-trained SLM is by performing conditional generation. Following previous work [11, 22], we use the 3-second prompt speech from the LibriSpeech test-clean. To evaluate the quality of the continuations generated by the SLMs, we employ GPT-4o to assign MOS scores regarding the transcribed speech continuation using ASR, focusing on the semantic coherence of the continuation. In addition, we compute UTMOS as for evaluating the speech quality and naturalness. Last but not least, we conduct human listening test, in which each evaluator is asked to give a MOS score regarding the overall performance of the generated speech continuation. The details of the instructions for GPT-4o and human are in Appendix. + +Likelihood-Based Evaluation Following previous work [11, 22, 33], we also evaluate our SLMs through likelihood-based benchmarks, where the accuracy score is based on whether the model chooses the correct continuation from the two given speech utterances based on its output likelihoods. + +Table 2: Pretrained SLM speech continuation and likelihood-based next-speech selection results. The superscripts at the bottom of the table indicate the base models used by each SLM, indicated by superscripts. Cascade models refer to the pipeline with ASR (whisper-large-v3), text continuation by LMs, and TTS (CosyVoice). This comparison evaluates SLMs and cascade models in continuation evaluation. As shown in the table, TASLM tends to preserve the semantic capabilities of LMs. + +
MethodFinetuned / base parametersCONTINUATIONLIKELIHOOD
GPT-4oUTMOSHumanSALMONStoryClozeOverall
Cascade
Cascade (LLaMA3.2-1Bα)-3.154.254.00---
Cascade (LLaMA2-7Bβ)-3.434.253.98---
Spoken LMs
TWIST 1.3B [11]1.3B / 1.3Bθ1.483.251.9562.561.562.0
TWIST 7B [11]7B / 7Bγ1.443.272.0463.464.764.1
Spirit LM [33]7B / 7Bβ2.793.412.3859.172.065.6
Spirit LM Expr. [33]7B / 7Bβ1.903.402.4169.066.267.6
Baseline (S3 token)45M / 1.3Bα1.374.042.8450.258.754.5
TASLM 1B (token)45M / 1.3Bα3.084.073.9360.876.568.7
TASLM 1B (embed.)45M / 1.3Bα3.164.224.1657.776.767.2
+ +Base models: $\alpha$ LLaMA3.2-1B, $\beta$ LLaMA2-7B, $\gamma$ LLaMA-7B, $\theta$ OPT-1.3B + +We adopt two established benchmarks SALMON [25] and spoken StoryCloze [11, 29], which covers the acoustic aspect and the semantic aspect, respectively. Since both benchmarks contain multiple tasks, we report the average accuracy across these tasks within each benchmark for simplicity. The detailed results are in Appendix A.5.1 for the interested readers. We also report the mean of the SALMON and StoryCloze as an overall assessment for both aspects. + +Results The results of TASLM comparing to other pre-trained SLM are in Table 2. First, we highlight that our TASLMs have achieved significantly higher scores on speech continuation across human and machine evaluations; and good performance on the likelihood-based benchmarks. Note that our base language model contains only 1.3 billion parameters, showing the effectiveness of using TASTE for joint modeling. Compared to the cascade method that has the same base model (first row), our $\mathrm{TASLM}_{\mathrm{emb}}$ scores comparable on GPT-4o but better on human MOS. This indicates that our generated speech is more natural than the cascade one that utilizes TTS for synthesis. Next, our TASLM is the only SLM that not only maintains but even surpasses the performance of its corresponding text-base model. Moreover, we demonstrate that directly using the S3 token for joint modeling following [46] does not yield performance comparable in any aspect. The result further strengthen the intuition behind TASTE (mitigating the length-mismatch during tokenization stage facilitates effective joint spoken language modeling). + +# 4.2.2 TASLM for Spoken Question Answering + +Following [6], we conduct evaluation on spoken question answering to investigate the understanding ability of our TASLM. For this experiment, we use the $\mathrm{TASLM}_{\mathrm{emb}}$ for simplicity. We compare our pre-trained only SLM with other instruction-finetuned joint SLMs such as Mini-Omni [46], Moshi [6], and Llama-Omni [9]. We use two spoken question answering benchmarks, Web Questions [2] and LLaMA-Questions [30], following [30]. We report the accuracy of answer containment. For fairness, we report not only the performance of the speech-text joint SLMs, but also the base text LLM they used if applicable. Our results indicate that our TASLM is the only method that does not + +degrade the corresponding text base LLM. We attribute the phenomenon to the effectiveness of our TASTE tokenization for joint speech-text modeling. + +Table 3: Evaluation of spoken question answering. Performance across modalities is compared row-wise, where T denotes text and A denotes audio. + +
MethodModeWeb Q.LLaMA-Q.
Mini-Omni 0.5B(T→T)T21.339.0
Mini-Omni 0.5B [46]T+A4.511.6
Helium 7B (text)T32.375.0
Moshi 7B [6]T+A26.662.3
LLaMA3.1-8B-InstructT60.471.7
Llama-Omni-8B [9]T+A35.567.3
LLaMA3.2-1B†T24.051.0
TASLM 1B (embed.)†T+A27.157.6
+ +We apply few-shot learning to facilitate question answering. + +# 5 Conclusion + +In this work, we propose Text-Aligned Speech Tokenization and Embedding (TASTE), to facilitate joint speech-text spoken language modeling. By aggregating proper encoder representation through the specialized cross-attention mechanism and taking the ASR model as initialization, we make the speech tokenization text-aligned in an end-to-end manner with no explicit word alignment required. We conduct extensive evaluation on our TASTE tokenizer. Our results show that TASTE allows high quality speech reconstruction at an extremely low bitrate. With our text-aligned speech tokenization and embedding, joint speech-text modeling becomes straightforward and effective. Our experimental results indicate that TASTE enables turning a text LLM into a spoken one with the simple parameter-efficient finetuning technique applied. + +**Limitation** Several limitations of our current work point to promising avenues for future research. First, neither our TASTE tokenization nor the text-aligned SLM has been optimized for time efficiency; developing a low-latency, streaming variant remains future work. Second, we have evaluated TASTE only on English data—its portability to other languages deserves thorough investigation. Third, although our pretrained SLM generates high-quality continuations, it does not yet support robust turn-taking or instruction-following behavior, both of which are essential for truly interactive systems. + +# References + +[1] Alexei Baevski, Yuhao Zhou, Abdelrahman Mohamed, and Michael Auli. wav2vec 2.0: A framework for self-supervised learning of speech representations. In Advances in Neural Information Processing Systems, 2020. +[2] Jonathan Berant, Andrew Chou, Roy Frostig, and Percy Liang. Semantic parsing on freebase from question-answer pairs. In Proceedings of the 2013 conference on empirical methods in natural language processing, 2013. +[3] Li-Wei Chen and Alexander Rudnicky. Fine-grained style control in transformer-based text-to-speech synthesis. In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2022. +[4] Michael Chinen, Felicia SC Lim, Jan Skoglund, Nikita Gureev, Feargus O'Gorman, and Andrew Hines. Visqol v3: An open source production ready objective speech and audio metric. In 2020 twelfth international conference on quality of multimedia experience (QoMEX), 2020. +[5] Alexandre Défossez, Jade Copet, Gabriel Synnaeve, and Yossi Adi. High fidelity neural audio compression. Transactions on Machine Learning Research, 2023. +[6] Alexandre Défossez, Laurent Mazaré, Manu Orsini, Amélie Royer, Patrick Pérez, Hervé Jégou, Edouard Grave, and Neil Zeghidour. Moshi: a speech-text foundation model for real-time dialogue. arXiv preprint arXiv:2410.00037, 2024. +[7] Zhihao Du, Qian Chen, Shiliang Zhang, Kai Hu, Heng Lu, Yexin Yang, Hangrui Hu, Siqi Zheng, Yue Gu, Ziyang Ma, et al. Cosyvoice: A scalable multilingual zero-shot text-to-speech synthesizer based on supervised semantic tokens. arXiv preprint arXiv:2407.05407, 2024. +[8] Zhihao Du, Yuxuan Wang, Qian Chen, Xian Shi, Xiang Lv, Tianyu Zhao, Zhifu Gao, Yexin Yang, Changfeng Gao, Hui Wang, et al. Cosyvoice 2: Scalable streaming speech synthesis with large language models. CoRR, 2024. +[9] Qingkai Fang, Shoutao Guo, Yan Zhou, Zhengrui Ma, Shaolei Zhang, and Yang Feng. LlamaOmni: Seamless speech interaction with large language models. CoRR, 2024. +[10] Sanchit Gandhi, Patrick von Platen, and Alexander M Rush. Distil-whisper: Robust knowledge distillation via large-scale pseudo labelling. arXiv preprint arXiv:2311.00430, 2023. +[11] Michael Hassid, Tal Remez, Tu Anh Nguyen, Itai Gat, Alexis Conneau, Felix Kreuk, Jade Copet, Alexandre Defossez, Gabriel Synnaeve, Emmanuel Dupoux, et al. Textually pretrained speech language models. Advances in Neural Information Processing Systems, 2023. +[12] Haorui He, Zengqiang Shang, Chaoren Wang, Xuyuan Li, Yicheng Gu, Hua Hua, Liwei Liu, Chen Yang, Jiaqi Li, Peiyang Shi, et al. Emilia: An extensive, multilingual, and diverse speech dataset for large-scale speech generation. In 2024 IEEE Spoken Language Technology Workshop (SLT), 2024. +[13] Pin-Lun Hsu, Yun Dai, Vignesh Kothapalli, Qingquan Song, Shao Tang, Siyu Zhu, Steven Shimizu, Shivam Sahni, Haowen Ning, and Yanning Chen. Liger kernel: Efficient triton kernels for llm training. arXiv preprint arXiv:2410.10989, 2024. +[14] Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, and Abdelrahman Mohamed. Hubert: Self-supervised speech representation learning by masked prediction of hidden units. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2021. +[15] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuzhhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685, 2021. +[16] Zeqian Ju, Yuancheng Wang, Kai Shen, Xu Tan, Detai Xin, Dongchao Yang, Yanqing Liu, Yichong Leng, Kaitao Song, Siliang Tang, et al. Naturalspeech 3: Zero-shot speech synthesis with factorized codec and diffusion models. International Conference on Machine Learning, 2024. + +[17] Jaehyeon Kim, Keon Lee, Seungjun Chung, and Jaewoong Cho. Clam-tts: Improving neural codec language model for zero-shot text-to-speech. *ICLR*, 2024. +[18] Taku Kudo. Subword regularization: Improving neural network translation models with multiple subword candidates. Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2018. +[19] Taku Kudo and John Richardson. Sentencepiece: A simple and language independent subword tokenizer and tokenizer for neural text processing. Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, 2018. +[20] Rithesh Kumar, Prem Seetharaman, Alejandro Luebs, Ishaan Kumar, and Kundan Kumar. High-fidelity audio compression with improved rvqgan. Advances in Neural Information Processing Systems, 2023. +[21] Kushal Lakhotia, Eugene Kharitonov, Wei-Ning Hsu, Yossi Adi, Adam Polyak, Benjamin Bolte, Tu-Anh Nguyen, Jade Copet, Alexei Baevski, Abdelrahman Mohamed, et al. On generative spoken language modeling from raw audio. Transactions of the Association for Computational Linguistics, 2021. +[22] Guan-Ting Lin, Prashanth Gurunath Shivakumar, Aditya Gourav, Yile Gu, Ankur Gandhe, Hung-yi Lee, and Ivan Bulyko. Align-slm: Textless spoken language models with reinforcement learning from ai feedback. arXiv preprint arXiv:2411.01834, 2024. +[23] Yaron Lipman, Ricky TQ Chen, Heli Ben-Hamu, Maximilian Nickel, and Matt Le. Flow matching for generative modeling. The Eleventh International Conference on Learning Representations, 2022. +[24] Alexander H Liu, Sang-gil Lee, Chao-Han Huck Yang, Yuan Gong, Yu-Chiang Frank Wang, James R Glass, Rafael Valle, and Bryan Catanzaro. Uniwav: Towards unified pre-training for speech representation learning and generation. The Thirteenth International Conference on Learning Representations, 2025. +[25] Gallil Maimon, Amit Roth, and Yossi Adi. Salmon: A suite for acoustic language model evaluation. arXiv preprint arXiv:2409.07437, 2024. +[26] Michael McAuliffe, Michaela Socolof, Sarah Mihuc, Michael Wagner, and Morgan Sonderegger. Montreal forced aligner: Trainable text-speech alignment using kaldi. In Interspeech 2017, 2017. +[27] Shivam Mehta, Ambika Kirkland, Harm Lameris, Jonas Beskow, Éva Székely, and Gustav Eje Henter. Overflow: Putting flows on top of neural transducers for better tts. Interspeech 2023, 2022. +[28] Lingwei Meng, Long Zhou, Shujie Liu, Sanyuan Chen, Bing Han, Shujie Hu, Yanqing Liu, Jinyu Li, Sheng Zhao, Xixin Wu, et al. Autoregressive speech synthesis without vector quantization. CoRR, 2024. +[29] Nasrin Mostafazadeh, Nathanael Chambers, Xiaodong He, Devi Parikh, Dhruv Batra, Lucy Vanderwende, Pushmeet Kohli, and James Allen. A corpus and evaluation framework for deeper understanding of commonsense stories. Proceedings of NAACL-HLT, 2016. +[30] Eliya Nachmani, Alon Levkovitch, Roy Hirsch, Julian Salazar, Chulayuth Asawaroengchai, Soroosh Mariooryad, Ehud Rivlin, RJ Skerry-Ryan, and Michelle Tadmor Ramanovich. Spoken question answering and speech continuation using spectrogram-powered llm. In The Twelfth International Conference on Learning Representations, 2024. +[31] Tu Anh Nguyen, Maureen de Seyssel, Patricia Rozé, Morgane Rivière, Evgeny Kharitonov, Alexei Baevski, Ewan Dunbar, and Emmanuel Dupoux. The zero resource speech benchmark 2021: Metrics and baselines for unsupervised spoken language modeling. NeuRIPS Workshop on Self-Supervised Learning for Speech and Audio Processing, 2020. + +[32] Tu Anh Nguyen, Eugene Kharitonov, Jade Copet, Yossi Adi, Wei-Ning Hsu, Ali Elkahky, Paden Tomasello, Robin Algayres, Benoit Sagot, Abdelrahman Mohamed, et al. Generative spoken dialogue language modeling. Transactions of the Association for Computational Linguistics, 2023. +[33] Tu Anh Nguyen, Benjamin Muller, Bokai Yu, Marta R Costa-Jussa, Maha Elbayad, Sravya Popuri, Christophe Ropers, Paul-Ambroise Duquenne, Robin Algayres, Ruslan Mavlyutov, et al. Spirit-lm: Interleaved spoken and written language model. Transactions of the Association for Computational Linguistics, 2025. +[34] Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. Librispeech: An asr corpus based on public domain audio books. In 2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2015. +[35] Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. Robust speech recognition via large-scale weak supervision. In International conference on machine learning, 2023. +[36] Jeff Rasley, Samyam Rajbhandari, Olatunjri Ruwase, and Yuxiong He. Deepspeed: System optimizations enable training deep learning models with over 100 billion parameters. In Proceedings of the 26th ACM SIGKDD international conference on knowledge discovery and data mining, 2020. +[37] Chandan KA Reddy, Vishak Gopal, and Ross Cutler. Dnsmos: A non-intrusive perceptual objective speech quality metric to evaluate noise suppressors. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2021. +[38] Takaaki Saeki, Detai Xin, Wataru Nakata, Tomoki Koriyama, Shinnosuke Takamichi, and Hiroshi Saruwatari. Utmos: Utokyo-sarulab system for voicemos challenge 2022. Interspeech 2022, 2022. +[39] Rico Sennrich, Barry Haddow, and Alexandra Birch. Neural machine translation of rare words with subword units. Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2016. +[40] B Series. Method for the subjective assessment of intermediate quality level of audio systems. International Telecommunication Union Radiocommunication Assembly, 2014. +[41] Hubert Siuzdak, Florian Grötschla, and Luca A Lanzendorfer. Snac: Multi-scale neural audio codec. Audio Imagination: NeurIPS 2024 Workshop AI-Driven Speech, Music, and Sound Generation, 2024. +[42] Hsiang-Sheng Tsai, Heng-Jui Chang, Wen-Chin Huang, Zili Huang, Kushal Lakhotia, Shu-wen Yang, Shuyan Dong, Andy T Liu, Cheng-I Jeff Lai, Jiatong Shi, et al. Superb-sg: Enhanced speech processing universal performance benchmark for semantic and generative capabilities. Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics, 2022. +[43] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 2017. +[44] Apoorv Vyas, Bowen Shi, Matthew Le, Andros Tjandra, Yi-Chiao Wu, Baishan Guo, Jiemin Zhang, Xinyue Zhang, Robert Adkins, William Ngan, et al. Audiobox: Unified audio generation with natural language prompts. arXiv preprint arXiv:2312.15821, 2023. +[45] Chengyi Wang, Sanyuan Chen, Yu Wu, Ziqiang Zhang, Long Zhou, Shujie Liu, Zhuo Chen, Yanqing Liu, Huaming Wang, Jinyu Li, et al. Neural codec language models are zero-shot text to speech synthesizers. arXiv preprint arXiv:2301.02111, 2023. +[46] Zhifei Xie and Changqiao Wu. Mini-omni: Language models can hear, talk while thinking in streaming. arXiv preprint arXiv:2408.16725, 2024. + +[47] Detai Xin, Xu Tan, Kai Shen, Zeqian Ju, Dongchao Yang, Yuancheng Wang, Shinnosuke Takamichi, Hiroshi Saruwatari, Shujie Liu, Jinyu Li, et al. Rall-e: Robust codec language modeling with chain-of-thought prompting for text-to-speech synthesis. arXiv preprint arXiv:2404.03204, 2024. +[48] Shu-wen Yang, Po-Han Chi, Yung-Sung Chuang, Cheng-I Jeff Lai, Kushal Lakhotia, Yist Y Lin, Andy T Liu, Jiatong Shi, Xuankai Chang, Guan-Ting Lin, et al. Superb: Speech processing universal performance benchmark. Interspeech 2021, 2021. +[49] Neil Zeghidour, Alejandro Luebs, Ahmed Omran, Jan Skoglund, and Marco Tagliasacchi. Soundstream: An end-to-end neural audio codec. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2021. +[50] Heiga Zen, Viet Dang, Rob Clark, Yu Zhang, Ron J Weiss, Ye Jia, Zhifeng Chen, and Yonghui Wu. Libritts: A corpus derived from librispeech for text-to-speech. Interspeech 2019, 2019. +[51] Xin Zhang, Dong Zhang, Shimin Li, Yaqian Zhou, and Xipeng Qiu. Speechtokenizer: Unified speech tokenizer for speech large language models. ICLR, 2024. + +# A Technical Appendices and Supplementary Material + +# A.1 Related Work + +Recent SLMs often require speech tokenization to conduct language modeling with the next prediction objective as the text LLMs. Unlike text, the speech signal is continuous and lengthy, making it difficult to derive proper speech tokenization for spoken language modeling. Common approaches may utilize self-supervised learned (SSL) speech models followed by quantization techniques to extract speech tokens [1, 11, 14, 21, 33]. In addition, audio or speech codec models have also been used for tokenization in recent SLMs [5, 6, 49, 51]. These models are designed for resynthesis, where the speech decoders are jointly learned with the encoders, making them easy to use for spoken language modeling. + +With speech tokenization, GSLM [21, 32] first demonstrates the possibility of building an SLM that can generate speech. TWIST [11] further shows that SLM can benefit from initialization with the text-pretrained LLM. With regard to the huge success of text-only LLMs, recent work shifts the focus towards joint speech-text modeling [6, 11, 46]. Challenged by the modality gap between speech and text tokens, different techniques are introduced to facilitate joint modeling. Spirit LM [33] adopts an interleaving strategy; moshi [6] trains its own tokenizer with a reduced token frequency. Moreover, different patterns and strategies such as delayed or sequential generation are introduced for joint modeling, aiming for more reasonable and coherent speech outputs [46]. + +Despite the increasing demand of joint speech-text modeling [6, 33, 46], we do not find any work discussing the effectiveness of current speech tokenization for it. Moreover, the speech token is often derived with speech or audio-only data2. Nonetheless, we observe that recent work is trying to mitigate the modality gap by reducing frequency speech token or conducting additional training stage for text-speech alignment. This motivates us to design a speech tokenization that is directly aligned with its text counterpart, tackling the mismatch issue during the tokenization stage. + +In the main text, we have mentioned that we utilize a specialized mechanism based on attention to extract and aggregate the encoder representations. We clarify that the text-speech cross-attention mechanism has also been used for fine-grained control of text-to-speech synthesis (TTS). More specifically, Chen and Rudnicky [3] propose content-style cross-attention to indicate their text-speech cross-attention mechanism that enables style transfer in TTS. Although both utilize specialized text-speech cross-attention mechanism, the design choices and problem formulations are completely different. We attribute of our main novelty to inventing a text-aligned speech tokenization and embedding for joint spoken language modeling, and the text-speech cross attention mechanism is considered and shown to be a clean, effective, and straightforward way of achieving it. + +# A.2 Tackling the Vocabulary Mismatch + +The vocabulary mismatch problem lies in the fact that the vocabulary sets are different between the ASR and the LLM, and TASTE is aligned with the text transcription tokens from ASR. Consider that given a text transcription $\mathbf{v}$ and the vocabulary sets of ASR and LLM denoted as $\mathbb{V}^{\mathrm{asr}}$ and $\mathbb{V}^{\mathrm{llm}}$ , the ASR tokenized sequence $\mathbf{v}^{\mathrm{asr}} = [v_1^{\mathrm{asr}}, v_2^{\mathrm{asr}}, \dots, v_N^{\mathrm{asr}}]$ , $v_i^{\mathrm{asr}} \in \mathbb{V}^{\mathrm{asr}}$ and the LLM tokenized sequence $\mathbf{v}^{\mathrm{llm}} = [v_1^{\mathrm{llm}}, v_2^{\mathrm{llm}}, \dots, v_M^{\mathrm{llm}}]$ , $v_i^{\mathrm{llm}} \in \mathbb{V}^{\mathrm{llm}}$ can be different in terms of token ids and sequence lengths. Since the TASTE token and embedding are aligned with $\mathbf{v}^{\mathrm{asr}}$ , we need to derive a method to align them with $\mathbf{v}^{\mathrm{llm}}$ for text-aligned speech-text modeling. Notice that $\mathbf{v}^{\mathrm{asr}}$ and $\mathbf{v}^{\mathrm{llm}}$ both represent $\mathbf{v}$ , we propose to mitigate the issue through word-level grouping, averaging, and aligning, detailed in Algorithm 1. By crafting TASTE speech tokenization into the word level, we are able to align it with the text tokens of the LLM, denoted as $\tilde{\mathbf{q}}, \tilde{\mathbf{z}}$ . In practice, we also adopt the word-level averaging technique during the TASTE tokenization training phase, ensuring that the word-level TASTE tokenization facilitates high-quality reconstruction. + +Algorithm 1 Aligning TASTE with LLM Tokenization via Word-Level Techniques +1: Initialization: Text transcription $\pmb{v} = [\mathrm{word}_1,\mathrm{word}_2,\dots ,\mathrm{word}_W]$ ASR tokens of the transcription $v^{\mathrm{asr}} = [v_1^{\mathrm{asr}},v_2^{\mathrm{asr}},\dots ,v_N^{\mathrm{asr}}]$ TASTE embedding $\hat{z} = [\hat{z}_1,\hat{z}_2,\dots ,\hat{z}_N]$ LLM tokens of the transcription $v^{\mathrm{llm}} = [v_1^{\mathrm{llm}},v_2^{\mathrm{llm}},\dots ,v_M^{\mathrm{llm}}]$ +2: procedure WORDLEVELGROUPING(v, $v^{\mathrm{asr}},\hat{z},v^{\mathrm{llm}})$ +3: Since $v^{\mathrm{asr}}$ is a token sequence represents $\mathbf{v}$ we can easily group it by words: +4: $\pmb{v}_{\mathrm{grouped}}^{\mathrm{asr}}\leftarrow \underbrace{\left[(v_{1}^{\mathrm{asr}},v_{2}^{\mathrm{asr}},v_{3}^{\mathrm{asr}})_{1},(v_{4}^{\mathrm{asr}})_{2},\ldots,(v_{N - 1}^{\mathrm{asr}},v_{N}^{\mathrm{asr}})_{W}\right]}_{\text{word}_1}$ ▷Group $v^{\mathrm{asr}}$ by the words of $\pmb{v}$ +5: With the word-level grouping from $v^{\mathrm{asr}}$ , we can group TASTE embedding $\hat{z}$ as well: +6: $\hat{z}_{\mathrm{grouped}}\gets \left[(\hat{z}_1,\hat{z}_2,\hat{z}_3)_1,(\hat{z}_4)_2,\dots,(\hat{z}_{N - 1},\hat{z}_N)W\right]$ +7: Finally, we can group $v^{\mathrm{llm}}$ following the similar procedure of grouping $v^{\mathrm{asr}}$ . +8: $\pmb{v}_{\mathrm{grouped}}^{\mathrm{llm}}\leftarrow \underbrace{\left[(v_{1}^{\mathrm{llm}},v_{2}^{\mathrm{llm}})_{1},(v_{3}^{\mathrm{llm}},v_{4}^{\mathrm{llm}})_{2},\dots,(v_{M - 2}^{\mathrm{llm}},v_{M - 1}^{\mathrm{llm}},v_{M}^{\mathrm{llm}})_{W}\right]}_{\text{word}_2}$ +9: Due to the vocabulary mismatch, the grouping of $v^{\mathrm{llm}}$ is different from $v^{\mathrm{asr}}$ , $\hat{z}_{\mathrm{grouped}}$ +10: end procedure +11: procedure WORDLEVELAVERAGING( $\hat{z}_{\mathrm{grouped}}$ ) +12: $\bar{z}\gets []$ ▷ Initialize a new sequence +13: for word group index $i\gets 1$ to $W$ do +14: word group $(\hat{z}_j,\dots,\hat{z}_k)\gets \hat{z}_{\mathrm{grouped}}[i]$ +15: $\bar{z}_{[j:k]}\gets \text{Average} ((\hat{z}_j,\dots,\hat{z}_k))$ ▷Average the word group +16: append $\bar{z}_{[j:k]}$ to $\bar{z}$ +17: end for +18: Resulting in word-level TASTE embedding $\bar{z}\in \mathbb{R}^{W\times d_z}$ , where $W$ is the word length of $V$ +19: end procedure +20: procedure ALIGNWORDLEVELEMBEDGINGWITHLLM( $\bar{z},v_{\mathrm{grouped}}^{\mathrm{llm}}$ ) +21: $\tilde{z}\gets []$ ▷ Initialize a new sequence +22: for word group index $i\gets 1$ to $W$ do +23: word group $(v_j^{\mathrm{llm}},\dots,v_k^{\mathrm{llm}})\gets v_{\mathrm{grouped}}[i]$ +24: $M\gets \text{Length}((v_j^{\mathrm{llm}},\dots,v_k^{\mathrm{llm}}))$ ▷Get the length of the word group. +25: for $m\gets 1$ to $M$ do ▷add $M\times \bar{z}[i]$ into the aligned sequence $\tilde{z}$ +26: append $\tilde{z}[i]$ to $\tilde{z}$ +27: end for +28: end for +29: end procedure +30: return The LLM-aligned word-level TASTE embedding $\tilde{z}$ and its codes form $\tilde{\boldsymbol{q}}$ + +# A.3 Training Details + +We separate the training process into the two phases: deriving TASTE tokenization and conducting spoken language modeling with TASTE. In the tokenization phase, only the Aggregator, Quantizer, and the UnitDecoder is trainable. We use the Adam optimizer and the learning rate is set to 0.0016. The batch size is set to 160 seconds on each of the 8 NVIDIA A6000 GPUs we used. Note that in the first 2 epochs the quantization is not applied. From the beginning of the third epoch, quantization is applied and the Quantizer starts to be updated. We train the TASTE tokenizer for 5 epochs, which takes about 2 days for learning, with the learning rate gradually decayed. + +As for the spoken language modeling training phase, we use the AdamW optimizer, the Consine scheduler with the learning rate set to 1e-5. We use 8 Nvidia A6000 GPUs for training. The total batch size summation over the GPUs is set to 768 samples with the gradient accumulation steps set to 2. To reduce the memory overhead and the computational cost, we employ bfloat16 mixed precision during training. Tools such as DeepSpeed [36] and Liger Kernel [13] are also applied to speed up the fine-tuning process. + +# A.4 Evaluation Details + +# A.4.1 Human Evaluation + +We conduct human listening tests through Amazon Mechanical Turk. In each experiment, we randomly select the same 20 samples from each method; and for each sample we collect more than 10 evaluation scores across different human evaluators. + +MUSHRA In Table 1, we have shown our result of the MUSRHA human listening test [40]. Following [51], we conduct the evaluation with a hidden reference but without a lowerpass-filtered anchor. We instruct evaluators to rate the perceptual quality of the given samples with respect to the ground truth on a scale of 1 to 100. + +Speech Continuation MOS In Table 2, we mention that we have conducted the human listening test to evaluate the overall performance of the speech continuations. Here, we present the instruction for human speech continuation MOS evaluation as follows: + +# Instruction for Human Speech Continuation MOS Evaluation + +In this test, each sample will contain a short audio clip called "prompt" (3 seconds) and a longer audio clip called "prompt+continuation" ( $\sim$ 15 seconds). + +You will be asked to rate the speech quality of the "prompt+continuation" audio clip, specifically focus on the "continuation" part. + +The rating should be based on how likely you think that the long audio is a proper continuation of the "prompt" audio. + +Specifically, the rating should be based on the following scale: + +1: Bad - The "continuation" is not distinguishable or not natural. +2: Poor - The "continuation" is $25\%$ distinguishable. +3: Fair - The "continuation" is $50\%$ distinguishable and natural. +4: Good - The "continuation" is $75\%$ distinguishable and natural. +5: Excellent - The "continuation" is distinguishable, meaningful, and natural. + +Distinguishable means that the words in the "continuation" is distinguishable. + +Natural means that the "continuation" sounds like a real human voice and a natural continuation of the prompt without considering the content of the speech. + +Meaningful means that you can not only distinguish the words but also understand the meaning of the whole "prompt+continuation". + +# A.4.2 GPT-4o for MOS Evaluation + +As introduced in Section 4.2.1, we use GPT-4o to assign MOS scores to the speech continuation results. Here, we describe the detailed procedure. First, whisper-large-v3 is applied to transcribe the generated speech. Then, given the transcription, the text content from the prompt audio, and the instruction template, GPT-4o can produce a score between 1 and 5. The instruction template is provided below: + +# Instruction Prompt for GPT-4o MOS Evaluation + +The task is evaluating the relevance and likelihood of the predicted text continuation, given the text prompt. You should also consider whether the meaning of the text continuation is making sense. The text prompt is: + +{"prompt"} + +, and the text continuation is : + +{"content"} + +You must give an overall rating from 1 to 5. The rating guideline is as below: + +1: The text continuation is very unlikely and irrelevant to the text prompt. + +2: The text continuation is unlikely and marginally relevant to the text prompt. + +3: The text continuation is moderately likely and relevant to the text prompt. + +4: The text continuation is likely and relevant to the text prompt. + +5: The text continuation is very likely and highly relevant. + +You should take the following steps to provide the score: + +First: briefly analyze the sample with the above definition. + +Second: MUST follow the output format as: I would rate the score as _ + +# A.5 Additional Results + +# A.5.1 Details on SALMON and StoryCloze + +Our detailed results on SALMON and StoryCloze are reported in Table 4. The introductions of the two benchmarks—SALMON and StoryCloze—are described below. + +SALMON for Acoustic Evaluation SALMON offers a comprehensive set of metrics designed to evaluate SLMs in multiple dimensions. In summary, each test sample consists of a positive sample and a negative sample. The negative sample differs from the positive sample by having some segments altered. These alterations include changes in speaker, gender, environment (e.g., room acoustics), or sentiment in the middle of the utterance. The SLM serves as an anomaly detector that aims to distinguish between the pairs of positive and negative samples. The distinction is based on + +Table 4: The evaluation results on SALMON and StoryCloze of different SLMs, and BG means background. We report likelihood-based accuracy on SALMON (acoustic aspect) and StoryCloze (semantic aspect). The baseline (S3 token) is conducted by joint speech-text modeling with the S3 token as speech tokenization. + +
METHODLoRASALMON (ACOUSTIC CONSISTENCY)STORYCLOZE
SentimentSpeakerGenderRoomBG (domain)BG (rand.)sSC / tSC
Previous Work
TWIST 1.3B ([11])X61.5±3.469.0±3.369.5±3.359.0±3.555.5±3.560.5±3.552.4±0.8 / 70.6±0.7
TWIST 7B ([11])X61.5±3.471.0±3.270.0±3.262.0±3.455.5±3.560.5±3.555.3±0.8 / 74.1±0.7
Spirit LM ([33])X54.5±3.569.5±3.367.0±3.354.5±3.553.5±3.555.5±3.561.0±0.8 / 82.9±0.6
Spirit LM Expr. ([33])X73.5±3.181.0±2.885.0±2.554.5±3.556.0±3.564.0±3.456.9±0.8 / 75.4±0.7
Ours
Baseline (S3 token)49.5±3.548.8±3.548.8±3.549.5±3.555.3±3.549.5±3.554.4±0.8 / 63.0±0.8
TASLM 1B (token)59.0±3.568.0±3.370.5±3.261.0±3.452.0±3.554.0±3.564.2±0.8 / 88.9±0.5
TASLM 1B (embedding)57.5±3.567.0±3.375.5±3.050.0±3.547.0±3.549.0±3.564.0±0.8 / 89.5±0.5
+ +the likelihood score given by each SLM, which is then evaluated with the overall precision between the ground truth and the prediction. + +StoryCloze for Semantic Evaluation To evaluate the SLMs' ability to comprehend semantic coherence and logical reasoning, we employ the spoken version of StoryCloze test (sSC) and the Topic StoryCloze test (tSC) assembled by ([11]). Assessment of narrative understanding involves presenting a four-sentence story setup, followed by two possible endings. These tasks require the model to select the most appropriate conclusion, thereby testing its grasp of causal and temporal relationships within a narrative. Similarly to SALMON, we measure the accuracy of the distinctions based on the likelihood scores. + +# A.5.2 Report of Standard Deviations + +We report the standard deviations of our tables in the main text to allow further investigation. + +Table 5: Results with standard deviations of Table 1 + +
MethodBitrateQUALITYSIMILARITY
WER ↓UTMOSDNSMOSViSQOLDrtn. Con.Spkr. Sim.MUSHRA
Ground Truth256k2.1%±0.074.09±0.323.84±0.26---76.6±15.9
Encodec [5]15005.1%±0.111.58±0.343.26±0.243.46±0.280.94±0.0030.63±0.10-
30002.6%±0.082.35±0.533.48±0.253.81±0.270.96±0.0020.78±0.0725.6±18.6
SpeechTokenizer [51]5005.2%±0.111.27±0.052.99±0.172.80±0.240.94±0.0030.35±0.09-
20003.0%±0.083.56±0.433.60±0.283.65±0.220.97±0.0020.80±0.0653.9±22.9
40002.5%±0.083.90±0.363.76±0.284.03±0.170.98±0.0020.92±0.04-
Mimi [6]10003.1%±0.093.60±0.373.60±0.303.62±0.260.96±0.0020.82±0.0667.6±19.8
S3 token (topline) [7]6003.0%±0.094.18±0.273.90±0.243.30±0.260.96±0.0020.82±0.0970.2±17.0
Text-only (baseline)~505.9%±0.114.31±0.164.11±0.222.44±0.230.57±0.0060.78±0.0942.6±27.1
TASTE (ours)~1504.4%±0.114.29±0.184.10±0.223.05±0.260.91±0.0030.80±0.0868.3±17.1
+ +Table 6: Results with standard deviations of Table 2. + +
MethodFinetuned / base parametersCONTINUATIONLIKELIHOOD
GPT-4oUTMOSHumanSALMONStoryClozeOverall
Cascade
Cascade (LLaMA3.2-1Bα)-3.15±1.274.25±0.224.00±1.28---
Cascade (LLaMA2-7Bβ)-3.43±1.274.25±0.253.98±1.29---
Spoken LMs
TWIST 1.3B [11]1.3B / 1.3Bθ1.48±0.703.25±0.481.95±1.0162.5±1.461.5±0.562.0±0.7
TWIST 7B [11]7B / 7Bγ1.44±0.703.27±0.522.04±0.9163.4±1.464.7±0.564.1±0.7
Spirit LM [33]7B / 7Bβ2.79±1.063.41±0.192.38±0.8159.1±1.472.0±0.565.6±0.7
Spirit LM Expr. [33]7B / 7Bβ1.90±1.033.40±0.302.41±0.9669.0±1.366.2±0.567.6±0.7
Baseline (S3 token)45M / 1.3Bα1.37±0.874.04±0.272.84±1.1150.2±1.458.7±0.654.5±0.8
TASLM 1B (token)45M / 1.3Bα3.08±1.374.07±0.283.93±1.3060.8±1.476.5±0.568.7±0.7
TASLM 1B (embed.)45M / 1.3Bα3.16±1.334.22±0.214.16±1.2057.7±1.476.7±0.567.2±0.7
+ +Base models: $\alpha$ LLaMA3.2-1B, $\beta$ LLaMA2-7B, $\gamma$ LLaMA-7B, $\theta$ OPT-1.3B + +Table 7: Results with standard deviations of Table 3. + +
MethodModeWeb Q.LLaMA-Q.
Mini-Omni 0.5B(T→T)T21.3±0.939.0±2.8
Mini-Omni 0.5B [46]T+A4.5±0.511.6±1.8
Helium 7B (text)T32.3±1.075.0±2.5
Moshi 7B [6]T+A26.6±1.062.3±2.8
LLaMA3.1-8B-InstructT60.4±1.171.7±2.6
Llama-Omni-8B [9]T+A35.5±1.167.3±2.7
LLaMA3.2-1B†T24.0±0.951.0±2.9
TASLM 1B (embed.)†T+A27.1±1.057.6±2.9
+ +We apply few-shot learning to facilitate question answering. \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07053/images/082bb6aa5d9a46479ef69e67a1c255233b2ead7447bd21827e559bc7215a53a7.jpg b/data/2025/2504_07xxx/2504.07053/images/082bb6aa5d9a46479ef69e67a1c255233b2ead7447bd21827e559bc7215a53a7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cbb3114ac4e94b2441e71a86b3a5c4556ed2aaba --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/082bb6aa5d9a46479ef69e67a1c255233b2ead7447bd21827e559bc7215a53a7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f267047109e9a1a40b9ea6e093a48ca4dac3f82e9eeae3038aef6dfacf2800ce +size 86250 diff --git a/data/2025/2504_07xxx/2504.07053/images/13b95a11153c7dc0abb384977a8b4f13f880ecdc763b376f421e3948b99c6a57.jpg b/data/2025/2504_07xxx/2504.07053/images/13b95a11153c7dc0abb384977a8b4f13f880ecdc763b376f421e3948b99c6a57.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30175ca407d0457b762b750e5af3610d3ba83397 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/13b95a11153c7dc0abb384977a8b4f13f880ecdc763b376f421e3948b99c6a57.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b8f4269513c0efe42b369e098ff26ce1562d22bf1e8e3666b515dafeacc97c06 +size 4137 diff --git a/data/2025/2504_07xxx/2504.07053/images/1a8ccabf79ad97b3b3d47d64ea800c7d4361d2c185e0e4c15151ce2ffc4f4b4e.jpg b/data/2025/2504_07xxx/2504.07053/images/1a8ccabf79ad97b3b3d47d64ea800c7d4361d2c185e0e4c15151ce2ffc4f4b4e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..89b69f4f38f0e57ee13b9bbd3bafa6ba4e238873 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/1a8ccabf79ad97b3b3d47d64ea800c7d4361d2c185e0e4c15151ce2ffc4f4b4e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5aeb33e9b01a77cbf459130803a0c86c6eee3f5c68954495731b4c4478b4af1 +size 43888 diff --git a/data/2025/2504_07xxx/2504.07053/images/1b178f39d74600bee181610c32c18474ca8adc9c9bfb2aa49ac9649a63e89cf4.jpg b/data/2025/2504_07xxx/2504.07053/images/1b178f39d74600bee181610c32c18474ca8adc9c9bfb2aa49ac9649a63e89cf4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..724c43b438d76ae64207f680209136cc615e8a27 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/1b178f39d74600bee181610c32c18474ca8adc9c9bfb2aa49ac9649a63e89cf4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d45a220fa9baf280579fb5542ec7a85ccf51bbb11f7544aa4fa4724d8aad8b9 +size 91573 diff --git a/data/2025/2504_07xxx/2504.07053/images/1ec9832577223e0e450fbaf505a535d7ac71357bf1e0ccf1924ef8ca6ebe0e86.jpg b/data/2025/2504_07xxx/2504.07053/images/1ec9832577223e0e450fbaf505a535d7ac71357bf1e0ccf1924ef8ca6ebe0e86.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5c23dbb2111bf41406848d6892d8cac17daad737 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/1ec9832577223e0e450fbaf505a535d7ac71357bf1e0ccf1924ef8ca6ebe0e86.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce23459771ff52b966f3e1922ce08ab12ca0623d347f91061fe2aadfc52496c6 +size 62478 diff --git a/data/2025/2504_07xxx/2504.07053/images/2f0bfffa21da24cd7b583028f31b507e7681918acc4a32cdc33e9ee74b4bd996.jpg b/data/2025/2504_07xxx/2504.07053/images/2f0bfffa21da24cd7b583028f31b507e7681918acc4a32cdc33e9ee74b4bd996.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a967e97ee6aa8988de8f0f91d7f07315d86008ac --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/2f0bfffa21da24cd7b583028f31b507e7681918acc4a32cdc33e9ee74b4bd996.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3d1c6037474d9a4554a9a7e945cd9a10015826dc59267b08662309c924e2c79e +size 34594 diff --git a/data/2025/2504_07xxx/2504.07053/images/30ea210718cf295d11ed251b2e0c4a61d62f09e7adfa8b646b7f51c353bd4ce3.jpg b/data/2025/2504_07xxx/2504.07053/images/30ea210718cf295d11ed251b2e0c4a61d62f09e7adfa8b646b7f51c353bd4ce3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f582f16183f2a5727095ffc1a1e20b8fe42470c9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/30ea210718cf295d11ed251b2e0c4a61d62f09e7adfa8b646b7f51c353bd4ce3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6cd16d6735400dbc296d03114150f77e011c2883b1dd09ed56eb53fa5732c57 +size 10289 diff --git a/data/2025/2504_07xxx/2504.07053/images/3427cad281a5439a9f7df10b83a4b3dac65abff326e4200bd70cb65812769ba6.jpg b/data/2025/2504_07xxx/2504.07053/images/3427cad281a5439a9f7df10b83a4b3dac65abff326e4200bd70cb65812769ba6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a01c38d0214b0be8d7d660a673e0e24037ba372 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/3427cad281a5439a9f7df10b83a4b3dac65abff326e4200bd70cb65812769ba6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4e455c54124cd4954734ebce1bf9107469e1c70867f8a668d20a8dc775a659c +size 38291 diff --git a/data/2025/2504_07xxx/2504.07053/images/3518462fe4b87eddccc916570b8c32f6eac7057ace9f6c5e868cb006d8238a4b.jpg b/data/2025/2504_07xxx/2504.07053/images/3518462fe4b87eddccc916570b8c32f6eac7057ace9f6c5e868cb006d8238a4b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..57f23fe3624df96a2cd95952dbfb111a193f5879 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/3518462fe4b87eddccc916570b8c32f6eac7057ace9f6c5e868cb006d8238a4b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:658c1f324dd7fc69c25d80363d0e71a469e5b8ca87a2bf1011196a95f4180ed0 +size 5348 diff --git a/data/2025/2504_07xxx/2504.07053/images/351f99d6838a20a4a0f4f2012a1716f5e56d7d8d0d6345456f74c0efd8ea496c.jpg b/data/2025/2504_07xxx/2504.07053/images/351f99d6838a20a4a0f4f2012a1716f5e56d7d8d0d6345456f74c0efd8ea496c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b5efae125bac8af59ecbbfa4bfd2e28c2abc4814 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/351f99d6838a20a4a0f4f2012a1716f5e56d7d8d0d6345456f74c0efd8ea496c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:aac846aaa5c68f887f355c9546ea2936571a25951ef6af16a50931959d145d2a +size 10037 diff --git a/data/2025/2504_07xxx/2504.07053/images/44030a4630031b1d50e7d5d616d615392e6364abb0724b6e1e1ab0c52d670e50.jpg b/data/2025/2504_07xxx/2504.07053/images/44030a4630031b1d50e7d5d616d615392e6364abb0724b6e1e1ab0c52d670e50.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0db65a26634bde76ddf4fa15175ab3a120501075 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/44030a4630031b1d50e7d5d616d615392e6364abb0724b6e1e1ab0c52d670e50.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fef2b7c887e75e6677319b4199f579476e87f9c5e2b35e5e689ffa10d546b41c +size 6016 diff --git a/data/2025/2504_07xxx/2504.07053/images/46c84b75462a0de2c6f9d19b33a64cae57a50c650d7f2f43f7c1867ad55d4557.jpg b/data/2025/2504_07xxx/2504.07053/images/46c84b75462a0de2c6f9d19b33a64cae57a50c650d7f2f43f7c1867ad55d4557.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e9c7f2be301633e32ead31b73ad036dc7faca9bf --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/46c84b75462a0de2c6f9d19b33a64cae57a50c650d7f2f43f7c1867ad55d4557.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d7b81cdcb0cca6b1ff08e366acf5cf3288e107105bdcac117f919eefd1fd9b9 +size 16895 diff --git a/data/2025/2504_07xxx/2504.07053/images/651f9ed6ac53046731d857cdd912265b0ec0c53f90e6424c0a888885260e44bf.jpg b/data/2025/2504_07xxx/2504.07053/images/651f9ed6ac53046731d857cdd912265b0ec0c53f90e6424c0a888885260e44bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6d34fa12f01681093e5e228cc8ab49fb8adefb6a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/651f9ed6ac53046731d857cdd912265b0ec0c53f90e6424c0a888885260e44bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:442946ab0d7540e8e1ce8c8ec3e10855b8452deb324fd9f53f5034d3435839a5 +size 10165 diff --git a/data/2025/2504_07xxx/2504.07053/images/6f422c78972733a9c1c7937597b9b2c0305d8d702292093ed6726cb8c91118e6.jpg b/data/2025/2504_07xxx/2504.07053/images/6f422c78972733a9c1c7937597b9b2c0305d8d702292093ed6726cb8c91118e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1fdcc466bd056f48260c4345f974f158262c0830 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/6f422c78972733a9c1c7937597b9b2c0305d8d702292093ed6726cb8c91118e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c1d12c7f5bbaceca4d2e1a578b423562a7f5256384d4de279a230111baf9a188 +size 3207 diff --git a/data/2025/2504_07xxx/2504.07053/images/81e12a75d5eaba0cef27031e19cf6686815234b8941b4228e622e8d418018a9b.jpg b/data/2025/2504_07xxx/2504.07053/images/81e12a75d5eaba0cef27031e19cf6686815234b8941b4228e622e8d418018a9b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..afc9debefb7badd8d30f705e3a8bd6238e95f090 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/81e12a75d5eaba0cef27031e19cf6686815234b8941b4228e622e8d418018a9b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89c0c3624ef3fc6d7aba83cfa5cf56755f8da8cad0f4651555b4645bd0ef6dac +size 50495 diff --git a/data/2025/2504_07xxx/2504.07053/images/8d340474064b30ca59c86aab174a8fe3962c8cd0fc819ba0a06bd76498b8005d.jpg b/data/2025/2504_07xxx/2504.07053/images/8d340474064b30ca59c86aab174a8fe3962c8cd0fc819ba0a06bd76498b8005d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7bb50dc73ea00fd1243be1a1e19aac6dc0502fe5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/8d340474064b30ca59c86aab174a8fe3962c8cd0fc819ba0a06bd76498b8005d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb0903c38d487728f1ff1a6a830575561da56b81b07574a0711fe60343c64d8a +size 9031 diff --git a/data/2025/2504_07xxx/2504.07053/images/bbdeb50fc600f45f77d15dcc3268b7b60b13f61b3c78d617a5b712289fd9f4c2.jpg b/data/2025/2504_07xxx/2504.07053/images/bbdeb50fc600f45f77d15dcc3268b7b60b13f61b3c78d617a5b712289fd9f4c2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..775a2e9b7e62d4f2ab8acdce3f3089a79a8d9c92 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/bbdeb50fc600f45f77d15dcc3268b7b60b13f61b3c78d617a5b712289fd9f4c2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f3743f5446f38348db55e4b4a3a2d3c9f7ee90f3893cabc1c352ca729dc0a64b +size 12028 diff --git a/data/2025/2504_07xxx/2504.07053/images/c37da4e246327bdeb5e0364e3b1c15c3f51d38c47be643ae90b43b1c67a0812a.jpg b/data/2025/2504_07xxx/2504.07053/images/c37da4e246327bdeb5e0364e3b1c15c3f51d38c47be643ae90b43b1c67a0812a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9cc948ab27832178c4f57e94612e89ee99411c29 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/c37da4e246327bdeb5e0364e3b1c15c3f51d38c47be643ae90b43b1c67a0812a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1433347ed11b7a30cafa032e9d384ad60bafe25002037271ff35b825729819ae +size 114424 diff --git a/data/2025/2504_07xxx/2504.07053/images/c6d5132251cd60884addea1cc932e94c730e578aeb4eca458f8ca4823797f9a7.jpg b/data/2025/2504_07xxx/2504.07053/images/c6d5132251cd60884addea1cc932e94c730e578aeb4eca458f8ca4823797f9a7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..845aea70d2e36be7e84489a8e59ca87fb5c2b13f --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/c6d5132251cd60884addea1cc932e94c730e578aeb4eca458f8ca4823797f9a7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fefb1f56cf6f7a2f1d0520c083b5398aa4b24145fb117303227836d1b7e76d8d +size 34215 diff --git a/data/2025/2504_07xxx/2504.07053/images/cd044b89abcba5b2d464085ca59cc04326658efa54fca2b094bce5b36bb2d8b4.jpg b/data/2025/2504_07xxx/2504.07053/images/cd044b89abcba5b2d464085ca59cc04326658efa54fca2b094bce5b36bb2d8b4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4e9bed5a160e3c4a488550f34da6b8397e641b0 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/cd044b89abcba5b2d464085ca59cc04326658efa54fca2b094bce5b36bb2d8b4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51233519f90e44b4c1be333604577d84ca6b13eb9466db8d61a63f4feb2fc989 +size 84594 diff --git a/data/2025/2504_07xxx/2504.07053/images/e45e9d10721807dc88752e032a3bc967c5e37ba7fac2be48a2c5ec4780a4ab95.jpg b/data/2025/2504_07xxx/2504.07053/images/e45e9d10721807dc88752e032a3bc967c5e37ba7fac2be48a2c5ec4780a4ab95.jpg new file mode 100644 index 0000000000000000000000000000000000000000..03e96b92f1d44264768d5d8247cb405925107d1e --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/e45e9d10721807dc88752e032a3bc967c5e37ba7fac2be48a2c5ec4780a4ab95.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:acefcab185dfbfd69ef800d3ea90cb80aad2a271eda89b395320b2756e3d4a01 +size 78832 diff --git a/data/2025/2504_07xxx/2504.07053/images/f32cb7408ec6859e010ad192990d938cfe29c53a64332a99b999f3b23eef07cd.jpg b/data/2025/2504_07xxx/2504.07053/images/f32cb7408ec6859e010ad192990d938cfe29c53a64332a99b999f3b23eef07cd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2bda6bd31d1227aba4bc10e1662453a64fdf8877 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/f32cb7408ec6859e010ad192990d938cfe29c53a64332a99b999f3b23eef07cd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:92fc4dc9db43f513b6ada1a0b4273d7e83bd48ea4fdc237022ffc8144deb3fdf +size 12329 diff --git a/data/2025/2504_07xxx/2504.07053/images/f8b51d6f60775b33dc604782b7f7194279a4a2edc6e0140367c70afe7225af4c.jpg b/data/2025/2504_07xxx/2504.07053/images/f8b51d6f60775b33dc604782b7f7194279a4a2edc6e0140367c70afe7225af4c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..af122781fc5aa8a74fb3e5a6fd5e19561c356d08 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/images/f8b51d6f60775b33dc604782b7f7194279a4a2edc6e0140367c70afe7225af4c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b87a3975a07e6a205500b02db9fe12600ebe4088a3ea6c993ec471019da10bef +size 7881 diff --git a/data/2025/2504_07xxx/2504.07053/layout.json b/data/2025/2504_07xxx/2504.07053/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..0ea678202b69a3911ad8c2743f24d49bb52f0732 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07053/layout.json @@ -0,0 +1,11781 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 130, + 96, + 479, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 96, + 479, + 138 + ], + "spans": [ + { + "bbox": [ + 130, + 96, + 479, + 138 + ], + "type": "text", + "content": "TASTE: Text-Aligned Speech Tokenization and Embedding for Spoken Language Modeling" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 177, + 496, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 177, + 496, + 192 + ], + "spans": [ + { + "bbox": [ + 116, + 177, + 496, + 192 + ], + "type": "text", + "content": "Liang-Hsuan Tseng\\*23 Yi-Chang Chen\\*1 Kuan-Yi Lee\\*23 Da-Shan Shiu\\*1 Hung-yi Lee\\*3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 216, + 201, + 394, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 216, + 201, + 394, + 213 + ], + "spans": [ + { + "bbox": [ + 216, + 201, + 394, + 213 + ], + "type": "text", + "content": "*Equal contribution 1MediaTek Research" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 172, + 213, + 437, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 172, + 213, + 437, + 225 + ], + "spans": [ + { + "bbox": [ + 172, + 213, + 437, + 225 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 172, + 213, + 437, + 225 + ], + "type": "text", + "content": "Internship at MediaTek Research " + }, + { + "bbox": [ + 172, + 213, + 437, + 225 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 172, + 213, + 437, + 225 + ], + "type": "text", + "content": "National Taiwan University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 200, + 235, + 411, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 235, + 411, + 246 + ], + "spans": [ + { + "bbox": [ + 200, + 235, + 411, + 246 + ], + "type": "text", + "content": "{yi-chang.chen, ds.shiu}@mtkresearch.com" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 189, + 247, + 421, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 247, + 421, + 257 + ], + "spans": [ + { + "bbox": [ + 189, + 247, + 421, + 257 + ], + "type": "text", + "content": "{f11921067,b10901091,hungyilee}@ntu.edu.tw" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 281, + 285, + 329, + 298 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 285, + 329, + 298 + ], + "spans": [ + { + "bbox": [ + 281, + 285, + 329, + 298 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 140, + 310, + 470, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 310, + 470, + 519 + ], + "spans": [ + { + "bbox": [ + 140, + 310, + 470, + 519 + ], + "type": "text", + "content": "Recent efforts target spoken language models (SLMs) that not only listen but also speak for more natural human-LLM interaction. Joint speech-text modeling is a promising direction to achieve this. However, the effectiveness of recent speech tokens for joint modeling remains underexplored. To address this, we introduce Text-Aligned Speech Tokenization and Embedding (TASTE), a method that directly addresses the modality gap by aligning speech token with the corresponding text transcription during the tokenization stage. We propose a method that can achieve this through a attention-based aggregation mechanism and with speech reconstruction as the training objective. We conduct extensive experiments and show that TASTE can preserve essential paralinguistic information while dramatically reducing the token sequence length. With TASTE, we perform straightforward joint spoken language modeling by using Low-Rank Adaptation on the pre-trained text LLM. Experimental results show that TASTE-based SLMs perform comparable to previous work on SALMON and StoryCloze; while significantly outperform other pre-trained SLMs on speech continuation across subjective and objective evaluations. To our knowledge, TASTE is the first end-to-end approach that utilizes a reconstruction objective to automatically learn a text-aligned speech tokenization and embedding suitable for spoken language modeling. Our demo, code, and model are available at https://mtkresearch.github.io/TASTE-SpokenLM.github.io." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 540, + 192, + 553 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 540, + 192, + 553 + ], + "spans": [ + { + "bbox": [ + 105, + 540, + 192, + 553 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 565, + 506, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 565, + 506, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 565, + 506, + 654 + ], + "type": "text", + "content": "Learning a speech representation suitable for universal speech-processing tasks has long been a significant challenge [31, 42, 48]. Unlike text—which can be encoded discretely [18, 19, 39]—speech is a continuous waveform carrying layered information (acoustic, semantic, prosodic, etc.). Recent neural self-supervised learning (SSL) methods move beyond filter-banks and MFCCs to encode raw audio into compact, high-dimensional embeddings that excel on discriminative tasks such as automatic speech recognition (ASR), emotion recognition, and speaker verification. Despite these gains, learning representations for generative speech tasks remains an open and more complex problem that has begun to attract focused attention [17, 28, 44, 45]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 658, + 506, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 658, + 506, + 715 + ], + "spans": [ + { + "bbox": [ + 104, + 658, + 506, + 715 + ], + "type": "text", + "content": "Among the generative speech tasks, spoken language modeling (SLM) is an intriguing direction, aiming to create models that can not only listen but also speak. Typically, building an SLM requires two stages: first, deriving speech tokenizations; second, training a language model on the speech tokens. For the speech tokens, previous approaches either apply SSL-based representations following by discretization techniques [11, 14, 21, 32] or reuse units from neural codec models like EnCodec" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 205, + 37, + 560 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 205, + 37, + 560 + ], + "spans": [ + { + "bbox": [ + 14, + 205, + 37, + 560 + ], + "type": "text", + "content": "arXiv:2504.07053v2 [cs.CL] 22 May 2025" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 731, + 193, + 742 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 731, + 193, + 742 + ], + "spans": [ + { + "bbox": [ + 105, + 731, + 193, + 742 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 71, + 233, + 164 + ], + "blocks": [ + { + "bbox": [ + 108, + 71, + 233, + 164 + ], + "lines": [ + { + "bbox": [ + 108, + 71, + 233, + 164 + ], + "spans": [ + { + "bbox": [ + 108, + 71, + 233, + 164 + ], + "type": "image", + "image_path": "46c84b75462a0de2c6f9d19b33a64cae57a50c650d7f2f43f7c1867ad55d4557.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 171, + 507, + 216 + ], + "lines": [ + { + "bbox": [ + 104, + 171, + 507, + 216 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 507, + 216 + ], + "type": "text", + "content": "Figure 1: The concept overview. Conventional methods extract speech tokens solely from speech, inducing length-mismatch problem when conducting joint speech-text modeling. By taking dual modalities as input, we generate speech tokenization that is aligned with text, facilitating straightforward and effective joint modeling." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 238, + 71, + 503, + 164 + ], + "blocks": [ + { + "bbox": [ + 238, + 71, + 503, + 164 + ], + "lines": [ + { + "bbox": [ + 238, + 71, + 503, + 164 + ], + "spans": [ + { + "bbox": [ + 238, + 71, + 503, + 164 + ], + "type": "image", + "image_path": "c6d5132251cd60884addea1cc932e94c730e578aeb4eca458f8ca4823797f9a7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "text", + "content": "and SoundStream [5, 20, 41, 49]. Although autoregressive modeling with these speech tokens shows great potential in text-to-speech (TTS) [45, 47], previous SLMs that model only speech tokens [21, 32] have been shown to lack semantic fidelity [22]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 285, + 504, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 285, + 504, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 285, + 504, + 373 + ], + "type": "text", + "content": "To bridge this gap, one promising direction is to utilize text—which is rich in semantic—during spoken language modeling. TWIST [11] shows that SLMs can benefit from initializing with text LLMs. More recent work often conducts joint speech-text modeling on tokens of both modalities to facilitate the semantic coherence on the generated speech [6, 9, 33, 46]. Yet integrating text and speech tokens introduces a length-mismatch challenge, as speech token sequences are usually longer than their text counterparts. Common remedies may include interleaving speech and text tokens [33] or inserting padding to synchronize sequence lengths between modalities [6, 9, 46]. However, these methods require either additional speech-text alignment or heuristic rules to enable joint modeling." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 378, + 506, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 378, + 506, + 489 + ], + "spans": [ + { + "bbox": [ + 104, + 378, + 506, + 489 + ], + "type": "text", + "content": "In this work, we introduce Text-Aligned Speech Tokenization and Embedding (TASTE), a special type of speech tokenization tailored for speech-text joint spoken language modeling. By acknowledging that the length mismatch introduces additional complexity in joint modeling, we develop our speech token to be aligned with its corresponding text transcription tokens. To achieve this, we first obtain the textual transcription of a speech with the ASR model; then we derive the speech token based on the transcription through a specialized cross-attention mechanism for speech reconstruction. Note that the full process can be accomplished in an end-to-end manner, with no explicit speech-text alignment required. Unlike previous speech tokens that are developed under a fixed stride with fixed down-sampling rate, our speech token has dynamic frequency as it is text-aligned. Figure 1 shows an overall concept of TASTE, illustrating how our tokenization allows straightforward joint modeling." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 492, + 504, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 492, + 504, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 492, + 504, + 581 + ], + "type": "text", + "content": "To evaluate the effectiveness of TASTE, we first conduct extensive experiments on speech reconstruction. Our results on LibriSpeech [34] show that TASTE not only resynthesizes speech in high quality, but also retains similarity to the original speech. TASTE achieves high-end reconstruction at an extremely low bit rate (" + }, + { + "bbox": [ + 104, + 492, + 504, + 581 + ], + "type": "inline_equation", + "content": "\\sim 150" + }, + { + "bbox": [ + 104, + 492, + 504, + 581 + ], + "type": "text", + "content": " bps); while the other comparable methods are often more than thousands of bps. More intriguingly, we demonstrate that TASTE allows simple text-aligned speech editing. By exchanging the partial text-aligned speech tokens from two different utterances with the same content, we demonstrate that the paralinguistic information such as duration and tone can be exchanged precisely following the words being exchanged, resulting in natural edited speech." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 585, + 506, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 585, + 506, + 695 + ], + "spans": [ + { + "bbox": [ + 104, + 585, + 506, + 695 + ], + "type": "text", + "content": "On the other hand, we demonstrate that TASTE successfully allows effective spoken language modeling. We perform straightforward joint modeling with TASTE under Low-Rank Adaptation [15]. We first perform speech continuation experiments with 3-second speech prompts given. The evaluation is three-fold. We use GPT-4o for evaluating the semantic aspect; UTMOS [38] for the acoustic aspect; and the human listening test for the general evaluation. Results show that our SLMs not only generate natural, meaningful speech continuations, but also outperforms the other 7B pre-trained SLMs across all the continuation evaluation aspects with 1.3B parameters. We also evaluate our SLMs on two benchmarks, SALMON [25] and StoryCloze [11] and our results show that our SLMs achieve comparable performance compared to the other speech-text joint modeling methods. Moreover, we show that our pretrained SLM can perform spoken question answering under few-shot scenario." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 506, + 723 + ], + "type": "text", + "content": "In summary, we derive TASTE, a text-aligned speech tokenization that allows effective joint speech-text spoken language modeling. By aligning the speech tokenization with its text counterpart during" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 76, + 500, + 216 + ], + "blocks": [ + { + "bbox": [ + 108, + 76, + 500, + 216 + ], + "lines": [ + { + "bbox": [ + 108, + 76, + 500, + 216 + ], + "spans": [ + { + "bbox": [ + 108, + 76, + 500, + 216 + ], + "type": "image", + "image_path": "1ec9832577223e0e450fbaf505a535d7ac71357bf1e0ccf1924ef8ca6ebe0e86.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 224, + 504, + 269 + ], + "lines": [ + { + "bbox": [ + 104, + 224, + 504, + 269 + ], + "spans": [ + { + "bbox": [ + 104, + 224, + 504, + 269 + ], + "type": "text", + "content": "Figure 2: The overall framework of our text-aligned speech tokenization and embedding. The left side illustrate the process of obtaining the TASTE tokenization " + }, + { + "bbox": [ + 104, + 224, + 504, + 269 + ], + "type": "inline_equation", + "content": "\\hat{z}" + }, + { + "bbox": [ + 104, + 224, + 504, + 269 + ], + "type": "text", + "content": ", detailed in Section 2.1.1; while the right side demonstrate how we reconstruct the speech with TASTE (Section 2.1.2). The training objective for our speech reconstruction is discussed in Section 2.1.3." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 290, + 504, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 290, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 104, + 290, + 504, + 335 + ], + "type": "text", + "content": "the tokenization stage, TASTE enables straightforward modeling. To our best knowledge, we are the first one to utilize the reconstruction objective to automatically derive a text-aligned speech tokenization and embedding that is suitable for joint speech-text spoken language modeling. Our demo is available at https://mtkresearch.github.io/TASTE-SpokenLM.github.io." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 350, + 167, + 361 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 350, + 167, + 361 + ], + "spans": [ + { + "bbox": [ + 105, + 350, + 167, + 361 + ], + "type": "text", + "content": "2 Method" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 374, + 506, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 374, + 506, + 409 + ], + "spans": [ + { + "bbox": [ + 104, + 374, + 506, + 409 + ], + "type": "text", + "content": "We propose text-aligned speech tokenization and embedding (TASTE) to facilitate effective joint speech-text spoken language modeling. Here, we first introduce how we derive our tokenization—TASTE—in Section 2.1, and then discuss how we use TASTE for spoken language modeling (§ 2.2)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 421, + 201, + 432 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 421, + 201, + 432 + ], + "spans": [ + { + "bbox": [ + 105, + 421, + 201, + 432 + ], + "type": "text", + "content": "2.1 Building TASTE" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 441, + 504, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 441, + 504, + 486 + ], + "spans": [ + { + "bbox": [ + 104, + 441, + 504, + 486 + ], + "type": "text", + "content": "As depicted in Figure 2, TASTE is comprised of the two main components: the text-aligned speech tokenizer (" + }, + { + "bbox": [ + 104, + 441, + 504, + 486 + ], + "type": "inline_equation", + "content": "\\S" + }, + { + "bbox": [ + 104, + 441, + 504, + 486 + ], + "type": "text", + "content": " 2.1.1) that produces the text-aligned speech tokenization; and the speech decoder (" + }, + { + "bbox": [ + 104, + 441, + 504, + 486 + ], + "type": "inline_equation", + "content": "\\S" + }, + { + "bbox": [ + 104, + 441, + 504, + 486 + ], + "type": "text", + "content": " 2.1.2) to reconstruct speech based on the text token and the TASTE speech token aligned with it. The training objective of speech reconstruction is described in Section 2.1.3." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 497, + 247, + 509 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 497, + 247, + 509 + ], + "spans": [ + { + "bbox": [ + 105, + 497, + 247, + 509 + ], + "type": "text", + "content": "2.1.1 TASTE Speech Tokenizer" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 516, + 504, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 516, + 504, + 594 + ], + "spans": [ + { + "bbox": [ + 104, + 516, + 504, + 594 + ], + "type": "text", + "content": "In TASTE, the speech tokenizer, denoted as Tokenizer(\\cdot), is designed to generate the text-aligned speech tokenization and embedding with the speech-text pair " + }, + { + "bbox": [ + 104, + 516, + 504, + 594 + ], + "type": "inline_equation", + "content": "X = (u, v)" + }, + { + "bbox": [ + 104, + 516, + 504, + 594 + ], + "type": "text", + "content": " taken as input, where " + }, + { + "bbox": [ + 104, + 516, + 504, + 594 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 516, + 504, + 594 + ], + "type": "text", + "content": " represents the textual transcription of the speech utterance " + }, + { + "bbox": [ + 104, + 516, + 504, + 594 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 104, + 516, + 504, + 594 + ], + "type": "text", + "content": ", which can be easily obtained through an automatic speech recognition (ASR) system. Recent developments in robust and efficient ASR ([10, 35]) allow us to focus on discussing how to derive the text-aligned speech token effectively by assuming that " + }, + { + "bbox": [ + 104, + 516, + 504, + 594 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 516, + 504, + 594 + ], + "type": "text", + "content": " is of sufficient quality. The TASTE speech tokenizer is composed of three major components: an encoder, an aggregator, and a quantizer." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 597, + 505, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 597, + 505, + 667 + ], + "spans": [ + { + "bbox": [ + 104, + 597, + 505, + 667 + ], + "type": "text", + "content": "The encoder Encoder " + }, + { + "bbox": [ + 104, + 597, + 505, + 667 + ], + "type": "inline_equation", + "content": "(\\cdot)" + }, + { + "bbox": [ + 104, + 597, + 505, + 667 + ], + "type": "text", + "content": " contains " + }, + { + "bbox": [ + 104, + 597, + 505, + 667 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 104, + 597, + 505, + 667 + ], + "type": "text", + "content": " layers of Transformer ([43]) encoder blocks and is used to extract high-dimensional speech representation. We employ the pre-trained Whisper ASR encoder [35] as our speech encoder, and it is frozen during training. For an input speech utterance " + }, + { + "bbox": [ + 104, + 597, + 505, + 667 + ], + "type": "inline_equation", + "content": "\\pmb{u}" + }, + { + "bbox": [ + 104, + 597, + 505, + 667 + ], + "type": "text", + "content": ", the encoder produces a sequence of hidden states from each layer " + }, + { + "bbox": [ + 104, + 597, + 505, + 667 + ], + "type": "inline_equation", + "content": "[h^{(1)}, h^{(2)}, \\dots, h^{(L)}]" + }, + { + "bbox": [ + 104, + 597, + 505, + 667 + ], + "type": "text", + "content": ". In our experiments, we retain the last hidden layer representation " + }, + { + "bbox": [ + 104, + 597, + 505, + 667 + ], + "type": "inline_equation", + "content": "h^{(L)}" + }, + { + "bbox": [ + 104, + 597, + 505, + 667 + ], + "type": "text", + "content": " and the shallow representation " + }, + { + "bbox": [ + 104, + 597, + 505, + 667 + ], + "type": "inline_equation", + "content": "h^{(l)}" + }, + { + "bbox": [ + 104, + 597, + 505, + 667 + ], + "type": "text", + "content": " from the first half of the hidden representations of the encoder for later usage, denoted as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 200, + 680, + 409, + 696 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 200, + 680, + 409, + 696 + ], + "spans": [ + { + "bbox": [ + 200, + 680, + 409, + 696 + ], + "type": "interline_equation", + "content": "\\boldsymbol {h} ^ {(L)}, \\boldsymbol {h} ^ {(l)} = \\operatorname {E n c o d e r} (\\boldsymbol {u}), \\quad \\text {w h e r e} 1 \\leq l \\leq \\left\\lfloor \\frac {L}{2} \\right\\rfloor .", + "image_path": "44030a4630031b1d50e7d5d616d615392e6364abb0724b6e1e1ab0c52d670e50.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 699, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 699, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 699, + 504, + 723 + ], + "type": "text", + "content": "Note that both of the hidden representations " + }, + { + "bbox": [ + 104, + 699, + 504, + 723 + ], + "type": "inline_equation", + "content": "\\pmb{h}^{(L)}, \\pmb{h}^{(l)} \\in \\mathbb{R}^{T \\times d_h}" + }, + { + "bbox": [ + 104, + 699, + 504, + 723 + ], + "type": "text", + "content": " have their length denoted as " + }, + { + "bbox": [ + 104, + 699, + 504, + 723 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 699, + 504, + 723 + ], + "type": "text", + "content": " and the hidden dimension indicated by " + }, + { + "bbox": [ + 104, + 699, + 504, + 723 + ], + "type": "inline_equation", + "content": "d_h" + }, + { + "bbox": [ + 104, + 699, + 504, + 723 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": "The hidden representations extracted from the encoder are then passed to the aggregator. The aggregator is designed to obtain a more compressed speech representation " + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\mathbf{z}" + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": " that is aligned in length with the text transcription " + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": ". Consider that " + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "inline_equation", + "content": "\\mathbf{v} = [v_{1}, v_{2}, \\dots, v_{N}]" + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "inline_equation", + "content": "v_{i} \\in \\mathbb{V}" + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": " is a text token sequence with length " + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 72, + 504, + 118 + ], + "type": "text", + "content": ", the input and output of the aggregator can be denoted as:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 127, + 120, + 481, + 135 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 120, + 481, + 135 + ], + "spans": [ + { + "bbox": [ + 127, + 120, + 481, + 135 + ], + "type": "interline_equation", + "content": "\\boldsymbol {z} = \\operatorname {A g g r e g a t o r} (\\boldsymbol {v}, \\boldsymbol {h} ^ {(L)}, \\boldsymbol {h} ^ {(l)}), \\text {w h e r e} \\boldsymbol {z} \\in \\mathbb {R} ^ {N \\times d _ {z}}, \\boldsymbol {v} \\in \\mathbb {V} ^ {N}, \\text {a n d} \\boldsymbol {h} ^ {(L)}, \\boldsymbol {h} ^ {(l)} \\in \\mathbb {R} ^ {T \\times d _ {h}}.", + "image_path": "30ea210718cf295d11ed251b2e0c4a61d62f09e7adfa8b646b7f51c353bd4ce3.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 138, + 504, + 172 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 138, + 504, + 172 + ], + "spans": [ + { + "bbox": [ + 104, + 138, + 504, + 172 + ], + "type": "text", + "content": "To make the speech representation " + }, + { + "bbox": [ + 104, + 138, + 504, + 172 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 104, + 138, + 504, + 172 + ], + "type": "text", + "content": " text-aligned, we conduct a simple yet effective attention mechanism based on the three inputs. Consider that the original multi-head attention in [43] is denoted as MultiHead(" + }, + { + "bbox": [ + 104, + 138, + 504, + 172 + ], + "type": "inline_equation", + "content": "Q, K, V" + }, + { + "bbox": [ + 104, + 138, + 504, + 172 + ], + "type": "text", + "content": "), our first layer attention in the aggregator takes:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 110, + 176, + 498, + 190 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 176, + 498, + 190 + ], + "spans": [ + { + "bbox": [ + 110, + 176, + 498, + 190 + ], + "type": "interline_equation", + "content": "Q = \\text {t e x t} v, \\quad K = \\text {e n c o d e r} h ^ {(L)}, \\quad V = \\text {e n c o d e r} h ^ {(l)}.", + "image_path": "351f99d6838a20a4a0f4f2012a1716f5e56d7d8d0d6345456f74c0efd8ea496c.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 193, + 506, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 193, + 506, + 303 + ], + "spans": [ + { + "bbox": [ + 104, + 193, + 506, + 303 + ], + "type": "text", + "content": "By doing so, the length of our first multi-head attention output should follow the text transcription " + }, + { + "bbox": [ + 104, + 193, + 506, + 303 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 193, + 506, + 303 + ], + "type": "text", + "content": ". Note that the query of the following layers becomes the output from the previous layer. In addition, intuitions of using the encoder's last hidden representation as keys, and the shallow hidden representation as values can be described as follows: 1) In Transformer-based ASR models, the last hidden states often encode rich speech-text alignment cues; sometimes the cross-attention weight matrices can even be exploited as soft word-alignment maps [10, 35]. 2) The shallow representation has been shown to support high-quality speech reconstruction even when the quantization is applied [7, 8]. Based on the above observations, we design our aggregator that can use the soft attention maps obtained from last encoder representations and the text transcriptions, to aggregate the shallow encoder representations that is beneficial for high-end speech reconstruction." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 308, + 504, + 353 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 504, + 353 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 504, + 353 + ], + "type": "text", + "content": "After getting the text-aligned representation, the quantizer " + }, + { + "bbox": [ + 104, + 308, + 504, + 353 + ], + "type": "inline_equation", + "content": "\\mathrm{Quantizer}(\\cdot)" + }, + { + "bbox": [ + 104, + 308, + 504, + 353 + ], + "type": "text", + "content": " is adopted to discretize the text-aligned representation. We use the residual vector quantization (RVQ) to allow coarse-to-fine quantization. Given the text-aligned speech representation " + }, + { + "bbox": [ + 104, + 308, + 504, + 353 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 104, + 308, + 504, + 353 + ], + "type": "text", + "content": " and the quantizer containing " + }, + { + "bbox": [ + 104, + 308, + 504, + 353 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 308, + 504, + 353 + ], + "type": "text", + "content": " residual vector quantization layers, we generate:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 161, + 356, + 505, + 389 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 161, + 356, + 505, + 389 + ], + "spans": [ + { + "bbox": [ + 161, + 356, + 505, + 389 + ], + "type": "interline_equation", + "content": "\\boldsymbol {q}, \\hat {\\boldsymbol {z}} = \\operatorname {Q u a n t i z e r} (\\boldsymbol {z}), \\quad \\boldsymbol {q} = \\left[ \\boldsymbol {q} ^ {(1)}, \\boldsymbol {q} ^ {(2)}, \\dots , \\boldsymbol {q} ^ {(R)} \\right], \\quad \\hat {\\boldsymbol {z}} = \\sum_ {r = 1} ^ {R} \\hat {\\boldsymbol {z}} ^ {(r)} \\tag {1}", + "image_path": "8d340474064b30ca59c86aab174a8fe3962c8cd0fc819ba0a06bd76498b8005d.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 393, + 504, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 393, + 504, + 428 + ], + "spans": [ + { + "bbox": [ + 104, + 393, + 504, + 428 + ], + "type": "text", + "content": "where each " + }, + { + "bbox": [ + 104, + 393, + 504, + 428 + ], + "type": "inline_equation", + "content": "\\pmb{q}^{(r)} \\in \\mathbb{C}^N" + }, + { + "bbox": [ + 104, + 393, + 504, + 428 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 104, + 393, + 504, + 428 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 393, + 504, + 428 + ], + "type": "text", + "content": "-th layer code sequence with code set " + }, + { + "bbox": [ + 104, + 393, + 504, + 428 + ], + "type": "inline_equation", + "content": "\\mathbb{C}" + }, + { + "bbox": [ + 104, + 393, + 504, + 428 + ], + "type": "text", + "content": "; and the quantized embedding " + }, + { + "bbox": [ + 104, + 393, + 504, + 428 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{z}}" + }, + { + "bbox": [ + 104, + 393, + 504, + 428 + ], + "type": "text", + "content": " to be the summation over each layer of the codebook vectors. Note that both of the code sequence and the quantized speech embedding " + }, + { + "bbox": [ + 104, + 393, + 504, + 428 + ], + "type": "inline_equation", + "content": "\\hat{\\pmb{z}}" + }, + { + "bbox": [ + 104, + 393, + 504, + 428 + ], + "type": "text", + "content": " are text-aligned, with the lengths to be " + }, + { + "bbox": [ + 104, + 393, + 504, + 428 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 393, + 504, + 428 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 438, + 241, + 450 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 438, + 241, + 450 + ], + "spans": [ + { + "bbox": [ + 105, + 438, + 241, + 450 + ], + "type": "text", + "content": "2.1.2 TASTE Speech Decoder" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 457, + 505, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 457, + 505, + 502 + ], + "spans": [ + { + "bbox": [ + 104, + 457, + 505, + 502 + ], + "type": "text", + "content": "The speech decoder aims to perform speech reconstruction conditioned on the text token sequence and the text-aligned speech tokenization. As shown in Figure 2, the text and speech tokens are aligned in lengths and being fed into the speech decoder after weighted sum in an autoregressive manner. The speech decoder is composed of the two components: the unit decoder and the unit-to-speech vocoder." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 506, + 504, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 504, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 504, + 529 + ], + "type": "text", + "content": "The unit decoder UnitDecoder " + }, + { + "bbox": [ + 104, + 506, + 504, + 529 + ], + "type": "inline_equation", + "content": "(\\cdot)" + }, + { + "bbox": [ + 104, + 506, + 504, + 529 + ], + "type": "text", + "content": " is a Transformer-based decoder that takes the text token sequence " + }, + { + "bbox": [ + 104, + 506, + 504, + 529 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 104, + 506, + 504, + 529 + ], + "type": "text", + "content": " and the aligned speech embedding " + }, + { + "bbox": [ + 104, + 506, + 504, + 529 + ], + "type": "inline_equation", + "content": "\\hat{\\mathbf{z}}" + }, + { + "bbox": [ + 104, + 506, + 504, + 529 + ], + "type": "text", + "content": " as input and predicts the speech unit " + }, + { + "bbox": [ + 104, + 506, + 504, + 529 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 104, + 506, + 504, + 529 + ], + "type": "text", + "content": " for reconstruction:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 251, + 533, + 504, + 545 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 251, + 533, + 504, + 545 + ], + "spans": [ + { + "bbox": [ + 251, + 533, + 504, + 545 + ], + "type": "interline_equation", + "content": "\\boldsymbol {y} = \\operatorname {U n i t D e c o d e r} (\\hat {\\boldsymbol {z}}, \\boldsymbol {v}). \\tag {2}", + "image_path": "13b95a11153c7dc0abb384977a8b4f13f880ecdc763b376f421e3948b99c6a57.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 548, + 504, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 548, + 504, + 583 + ], + "spans": [ + { + "bbox": [ + 104, + 548, + 504, + 583 + ], + "type": "text", + "content": "Note that the additional speaker embedding is also taken as input to facilitate global speaker voice control in our spoken language models [16]. After we generating the speech unit " + }, + { + "bbox": [ + 104, + 548, + 504, + 583 + ], + "type": "inline_equation", + "content": "\\mathbf{y}" + }, + { + "bbox": [ + 104, + 548, + 504, + 583 + ], + "type": "text", + "content": ", we use a unit-to-speech vocoder to further transform the unit into the reconstructed speech." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 593, + 219, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 593, + 219, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 593, + 219, + 605 + ], + "type": "text", + "content": "2.1.3 Training Objective" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 612, + 505, + 678 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 612, + 505, + 678 + ], + "spans": [ + { + "bbox": [ + 104, + 612, + 505, + 678 + ], + "type": "text", + "content": "Similar to other reconstruction-based speech tokens [24, 51], we derive TASTE by training it for speech resynthesis. To achieve this, we extract the speech unit " + }, + { + "bbox": [ + 104, + 612, + 505, + 678 + ], + "type": "inline_equation", + "content": "\\pmb{y}^{\\mathrm{target}}" + }, + { + "bbox": [ + 104, + 612, + 505, + 678 + ], + "type": "text", + "content": " with length " + }, + { + "bbox": [ + 104, + 612, + 505, + 678 + ], + "type": "inline_equation", + "content": "T'" + }, + { + "bbox": [ + 104, + 612, + 505, + 678 + ], + "type": "text", + "content": " from the original speech " + }, + { + "bbox": [ + 104, + 612, + 505, + 678 + ], + "type": "inline_equation", + "content": "u" + }, + { + "bbox": [ + 104, + 612, + 505, + 678 + ], + "type": "text", + "content": " as the target unit for our speech tokenizer and speech decoder. Given the text transcription " + }, + { + "bbox": [ + 104, + 612, + 505, + 678 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 612, + 505, + 678 + ], + "type": "text", + "content": ", the TASTE speech embedding " + }, + { + "bbox": [ + 104, + 612, + 505, + 678 + ], + "type": "inline_equation", + "content": "\\hat{z}" + }, + { + "bbox": [ + 104, + 612, + 505, + 678 + ], + "type": "text", + "content": ", and the unit from the original speech " + }, + { + "bbox": [ + 104, + 612, + 505, + 678 + ], + "type": "inline_equation", + "content": "\\pmb{y}^{\\mathrm{target}}" + }, + { + "bbox": [ + 104, + 612, + 505, + 678 + ], + "type": "text", + "content": " as the target, the speech reconstruction through the tokenizer and the unit decoder parametrized by " + }, + { + "bbox": [ + 104, + 612, + 505, + 678 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 612, + 505, + 678 + ], + "type": "text", + "content": " under the next prediction schema can be considered as minimizing the cross-entropy loss below:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 211, + 692, + 505, + 724 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 211, + 692, + 505, + 724 + ], + "spans": [ + { + "bbox": [ + 211, + 692, + 505, + 724 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {c e}} (\\theta) = \\frac {1}{| T ^ {\\prime} |} \\sum_ {t = 1} ^ {T ^ {\\prime}} - \\log p _ {\\theta} \\left(y _ {t} ^ {\\text {t a r g e t}} \\mid \\hat {\\boldsymbol {z}}, \\boldsymbol {v}; \\boldsymbol {y} _ {< t} ^ {\\text {t a r g e t}}\\right) \\tag {3}", + "image_path": "f8b51d6f60775b33dc604782b7f7194279a4a2edc6e0140367c70afe7225af4c.jpg" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 109 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 109 + ], + "type": "text", + "content": "On the other hand, we employ the quantization loss as well to tokenize the continuous representation " + }, + { + "bbox": [ + 104, + 72, + 504, + 109 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 104, + 72, + 504, + 109 + ], + "type": "text", + "content": " extracted from the encoder-aggregator. Following prior works [5, 49], given that " + }, + { + "bbox": [ + 104, + 72, + 504, + 109 + ], + "type": "inline_equation", + "content": "z^{(r)}" + }, + { + "bbox": [ + 104, + 72, + 504, + 109 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 104, + 72, + 504, + 109 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 72, + 504, + 109 + ], + "type": "text", + "content": "-th residual and " + }, + { + "bbox": [ + 104, + 72, + 504, + 109 + ], + "type": "inline_equation", + "content": "\\hat{z}^{(r)}" + }, + { + "bbox": [ + 104, + 72, + 504, + 109 + ], + "type": "text", + "content": " indicates the " + }, + { + "bbox": [ + 104, + 72, + 504, + 109 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 72, + 504, + 109 + ], + "type": "text", + "content": "-th quantized residual, the commitment loss is defined as:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 244, + 114, + 505, + 147 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 244, + 114, + 505, + 147 + ], + "spans": [ + { + "bbox": [ + 244, + 114, + 505, + 147 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {r v q}} (\\theta) = \\sum_ {r = 1} ^ {R} \\| z ^ {(r)} - \\hat {z} ^ {(r)} \\|. \\tag {4}", + "image_path": "3518462fe4b87eddccc916570b8c32f6eac7057ace9f6c5e868cb006d8238a4b.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 156, + 447, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 156, + 447, + 168 + ], + "spans": [ + { + "bbox": [ + 105, + 156, + 447, + 168 + ], + "type": "text", + "content": "By summation over both losses, we formulate the overall loss for training TASTE as:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 264, + 174, + 505, + 186 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 264, + 174, + 505, + 186 + ], + "spans": [ + { + "bbox": [ + 264, + 174, + 505, + 186 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\text {t a s t e}} = \\mathcal {L} _ {\\mathrm {c e}} + \\mathcal {L} _ {\\mathrm {r v q}}. \\tag {5}", + "image_path": "6f422c78972733a9c1c7937597b9b2c0305d8d702292093ed6726cb8c91118e6.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 191, + 504, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 191, + 504, + 214 + ], + "spans": [ + { + "bbox": [ + 104, + 191, + 504, + 214 + ], + "type": "text", + "content": "Note that to allow gradient to back-propagate from the unit decoder through the tokenizer, the straight-through estimation technique is applied towards the quantization process during training." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 227, + 299, + 239 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 227, + 299, + 239 + ], + "spans": [ + { + "bbox": [ + 105, + 227, + 299, + 239 + ], + "type": "text", + "content": "2.2 TASTE for Spoken Language Modeling" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 247, + 506, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 506, + 335 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 506, + 335 + ], + "type": "text", + "content": "Next, we describe how we conduct effective spoken language modeling with TASTE. Following previous work [11, 33], we perform pre-training on speech data. The text transcription of the speech data is also used for joint speech-text pre-training of our text-aligned spoken language model (TASLM). Since TASTE tokenization already aligns with the text token sequence, we can conduct a straightforward joint modeling, as illustrated in Figure 1. To demonstrate the robustness of TASTE, we perform two types of text-aligned spoken language modeling. First, we build " + }, + { + "bbox": [ + 104, + 247, + 506, + 335 + ], + "type": "inline_equation", + "content": "\\mathrm{TASLM}_{\\mathrm{token}}" + }, + { + "bbox": [ + 104, + 247, + 506, + 335 + ], + "type": "text", + "content": " over our text-aligned speech token " + }, + { + "bbox": [ + 104, + 247, + 506, + 335 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 247, + 506, + 335 + ], + "type": "text", + "content": ", discussed in Section 2.2.1. Then, we show how we build " + }, + { + "bbox": [ + 104, + 247, + 506, + 335 + ], + "type": "inline_equation", + "content": "\\mathrm{TASLM}_{\\mathrm{emb}}" + }, + { + "bbox": [ + 104, + 247, + 506, + 335 + ], + "type": "text", + "content": " with our text-aligned speech embedding " + }, + { + "bbox": [ + 104, + 247, + 506, + 335 + ], + "type": "inline_equation", + "content": "\\hat{z}" + }, + { + "bbox": [ + 104, + 247, + 506, + 335 + ], + "type": "text", + "content": ", detailed in Section 2.2.2." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 346, + 241, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 346, + 241, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 346, + 241, + 357 + ], + "type": "text", + "content": "2.2.1 Modeling TASTE Token" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 364, + 504, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 364, + 504, + 431 + ], + "spans": [ + { + "bbox": [ + 104, + 364, + 504, + 431 + ], + "type": "text", + "content": "As our speech tokens derived from the RVQ quantizer contain " + }, + { + "bbox": [ + 104, + 364, + 504, + 431 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 364, + 504, + 431 + ], + "type": "text", + "content": " layers of codes, we employ " + }, + { + "bbox": [ + 104, + 364, + 504, + 431 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 364, + 504, + 431 + ], + "type": "text", + "content": " linear heads for multi-head prediction in our " + }, + { + "bbox": [ + 104, + 364, + 504, + 431 + ], + "type": "inline_equation", + "content": "\\mathrm{TASLM}_{\\mathrm{token}}" + }, + { + "bbox": [ + 104, + 364, + 504, + 431 + ], + "type": "text", + "content": ". Namely, the " + }, + { + "bbox": [ + 104, + 364, + 504, + 431 + ], + "type": "inline_equation", + "content": "\\mathrm{TASLM}_{\\mathrm{token}}" + }, + { + "bbox": [ + 104, + 364, + 504, + 431 + ], + "type": "text", + "content": " simultaneously predicts the next text token and the corresponding " + }, + { + "bbox": [ + 104, + 364, + 504, + 431 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 364, + 504, + 431 + ], + "type": "text", + "content": " layers of speech tokens in each step. The overall training objective follows the original next token prediction scheme, but with multiple predictions across modalities at each step. Specifically, given the text transcription " + }, + { + "bbox": [ + 104, + 364, + 504, + 431 + ], + "type": "inline_equation", + "content": "v" + }, + { + "bbox": [ + 104, + 364, + 504, + 431 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 364, + 504, + 431 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 364, + 504, + 431 + ], + "type": "text", + "content": " layers of quantized RVQ codes " + }, + { + "bbox": [ + 104, + 364, + 504, + 431 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 104, + 364, + 504, + 431 + ], + "type": "text", + "content": ", the multi-head next-token prediction training objective can be formulated as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 145, + 437, + 505, + 469 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 437, + 505, + 469 + ], + "spans": [ + { + "bbox": [ + 145, + 437, + 505, + 469 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {t o k e n}} (\\phi) = \\frac {1}{| N |} \\sum_ {i = 1} ^ {N} \\left(- \\log p _ {\\phi} ^ {\\text {t e x t}} \\left(v _ {i} \\mid \\boldsymbol {v} _ {< i}, \\boldsymbol {q} _ {< i}\\right) + \\sum_ {r = 1} ^ {R} - \\log p _ {\\phi} ^ {(\\mathrm {r})} \\left(q _ {i} ^ {(\\mathrm {r})} \\mid \\boldsymbol {v} _ {< i}, \\boldsymbol {q} _ {< i}\\right)\\right), \\tag {6}", + "image_path": "f32cb7408ec6859e010ad192990d938cfe29c53a64332a99b999f3b23eef07cd.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "text", + "content": "with " + }, + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "inline_equation", + "content": "\\phi" + }, + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "text", + "content": " represents the parameter of the TASLM token, and " + }, + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "inline_equation", + "content": "p^{(r)}" + }, + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "text", + "content": " is the " + }, + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "text", + "content": "-th probability prediction for the " + }, + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 104, + 475, + 504, + 510 + ], + "type": "text", + "content": "-th RVQ code. As for inference, we directly sample the codes and the text simultaneously, and transform the codes into the corresponding embedding for the speech decoder to generate speech." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 521, + 265, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 521, + 265, + 533 + ], + "spans": [ + { + "bbox": [ + 105, + 521, + 265, + 533 + ], + "type": "text", + "content": "2.2.2 Modeling TASTE Embedding" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 540, + 504, + 606 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 540, + 504, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 540, + 504, + 606 + ], + "type": "text", + "content": "Besides the token code sets, recent progress on latent modeling [17, 28] motivates us to conduct experiments on modeling our text-aligned speech embedding. Referencing MELLE [28], we employ a linear layer that predicts the mean vector " + }, + { + "bbox": [ + 104, + 540, + 504, + 606 + ], + "type": "inline_equation", + "content": "\\mu_{i}" + }, + { + "bbox": [ + 104, + 540, + 504, + 606 + ], + "type": "text", + "content": " and a log-magnitude variance vector " + }, + { + "bbox": [ + 104, + 540, + 504, + 606 + ], + "type": "inline_equation", + "content": "\\log \\sigma_i^2" + }, + { + "bbox": [ + 104, + 540, + 504, + 606 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 540, + 504, + 606 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 540, + 504, + 606 + ], + "type": "text", + "content": " indicates the " + }, + { + "bbox": [ + 104, + 540, + 504, + 606 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 540, + 504, + 606 + ], + "type": "text", + "content": "-th frame of the sequence. And the final predicted latent of frame " + }, + { + "bbox": [ + 104, + 540, + 504, + 606 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 104, + 540, + 504, + 606 + ], + "type": "text", + "content": " is denoted as " + }, + { + "bbox": [ + 104, + 540, + 504, + 606 + ], + "type": "inline_equation", + "content": "e_i = \\mu_i + \\sigma_i\\odot \\epsilon" + }, + { + "bbox": [ + 104, + 540, + 504, + 606 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 540, + 504, + 606 + ], + "type": "inline_equation", + "content": "\\epsilon \\sim \\mathcal{N}(0,I)" + }, + { + "bbox": [ + 104, + 540, + 504, + 606 + ], + "type": "text", + "content": ". Following MELLE, the straight-through estimator is applied to allow gradients to back-propagate properly during training." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 610, + 506, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 610, + 506, + 633 + ], + "spans": [ + { + "bbox": [ + 104, + 610, + 506, + 633 + ], + "type": "text", + "content": "To facilitate latent prediction, we apply the regularization loss and the Kullback-Leibler (KL) divergence loss driving training, which is described as follows:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 129, + 639, + 505, + 673 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 639, + 505, + 673 + ], + "spans": [ + { + "bbox": [ + 129, + 639, + 505, + 673 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {r e g}} (\\psi) = \\left\\| \\boldsymbol {e} _ {\\psi} - \\hat {\\boldsymbol {z}} \\right\\| _ {2} ^ {2}, \\quad \\mathcal {L} _ {\\mathrm {K L}} = \\frac {1}{2} \\sum_ {i = 1} ^ {N} \\sum_ {j = 1} ^ {d _ {z}} \\left(\\sigma_ {i} [ j ] + \\left(\\mu_ {i} [ j ] - \\hat {z} _ {i} [ j ]\\right) ^ {2}\\right) - 1 - \\log \\sigma_ {i} ^ {2} [ j ]), \\tag {7}", + "image_path": "bbdeb50fc600f45f77d15dcc3268b7b60b13f61b3c78d617a5b712289fd9f4c2.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\psi" + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": " indicates the parameter of TASLMemb, and " + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "inline_equation", + "content": "d_{z}" + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": " is the dimension of our text-aligned embedding " + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\hat{z}" + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": ". The regularization loss " + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{reg}}" + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": " is adopted to predict close latent towards the target embedding " + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\hat{z}" + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": ". The KL divergence loss calculates the KL divergence between the predicted latent distribution and the target distribution. Following MELLE, we select the target distribution to be " + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(\\hat{z}_i,I)" + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "content": "This allows simplification of " + }, + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "inline_equation", + "content": "\\mathcal{L}_{\\mathrm{KL}}" + }, + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "content": ", which can then be approximated with the predicted vectors " + }, + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "inline_equation", + "content": "\\mu_i, \\sigma_i" + }, + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "content": ", and the target embedding " + }, + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "inline_equation", + "content": "\\hat{z}_i" + }, + { + "bbox": [ + 104, + 72, + 504, + 97 + ], + "type": "text", + "content": ". Finally, the overall loss along with the text loss is described as:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 162, + 102, + 505, + 135 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 102, + 505, + 135 + ], + "spans": [ + { + "bbox": [ + 162, + 102, + 505, + 135 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {e m b}} (\\psi) = \\lambda_ {\\mathrm {r e g}} \\cdot \\mathcal {L} _ {\\mathrm {r e g}} + \\lambda_ {\\mathrm {K L}} \\cdot \\mathcal {L} _ {\\mathrm {K L}} + \\frac {1}{| N |} \\sum_ {i = 1} ^ {N} - \\log p _ {\\psi} ^ {\\text {t e x t}} \\left(v _ {i} \\mid \\boldsymbol {v} _ {< i}, \\hat {\\boldsymbol {z}} _ {< i}\\right), \\tag {8}", + "image_path": "651f9ed6ac53046731d857cdd912265b0ec0c53f90e6424c0a888885260e44bf.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 140, + 418, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 140, + 418, + 154 + ], + "spans": [ + { + "bbox": [ + 104, + 140, + 418, + 154 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 140, + 418, + 154 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{reg}}" + }, + { + "bbox": [ + 104, + 140, + 418, + 154 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 140, + 418, + 154 + ], + "type": "inline_equation", + "content": "\\lambda_{\\mathrm{KL}}" + }, + { + "bbox": [ + 104, + 140, + 418, + 154 + ], + "type": "text", + "content": " to be the weighted coefficients of the two losses, respectively." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 167, + 220, + 182 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 167, + 220, + 182 + ], + "spans": [ + { + "bbox": [ + 105, + 167, + 220, + 182 + ], + "type": "text", + "content": "3 Experiment Setup" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 192, + 222, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 192, + 222, + 205 + ], + "spans": [ + { + "bbox": [ + 105, + 192, + 222, + 205 + ], + "type": "text", + "content": "3.1 Model Configuration" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 212, + 506, + 377 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 212, + 506, + 377 + ], + "spans": [ + { + "bbox": [ + 104, + 212, + 506, + 377 + ], + "type": "text", + "content": "For our TASTE speech tokenizer, we initialize our encoder from Whisper [35]. Specifically, we use whisper-large-v3 for our initialization. By doing so, we can reduce computational cost between obtaining the ASR transcription and extracting the TASTE tokenization with the TASTE encoder frozen during training. On the other hand, we use the S3 token from CosyVoice [7] as the target unit for speech reconstruction. Since their speech tokenization facilitates additional speaker embedding, we follow the same procedure to obtain one. Adding speaker embedding allows global speaker voice control, which is a reasonable and useful scenario for spoken language models. The unit-to-speech vocoder is comprised of a flow model [23, 27] and a HifiGAN. We use the published pre-trained ones from [7], and they are not involved in our training. For the quantizer, we set the RVQ layer " + }, + { + "bbox": [ + 104, + 212, + 506, + 377 + ], + "type": "inline_equation", + "content": "R = 4" + }, + { + "bbox": [ + 104, + 212, + 506, + 377 + ], + "type": "text", + "content": ", the codebook size 512, and the codebook dimension to be 256. For the spoken language modeling, we follow previous work [6, 11, 22, 33] and initialize our spoken language model from a text LLM. However, this introduces the vocabulary mismatch problem between the ASR and LLM. We resolve this issue by using word-level TASTE tokenization and embedding, which is detailed in Appendix A.2. Moreover, we conduct Low-Rank Adaptation (LoRA) for parameter-efficient fine-tuning of our TASLMs. We set the corresponding hyperparameters rank " + }, + { + "bbox": [ + 104, + 212, + 506, + 377 + ], + "type": "inline_equation", + "content": "r = 64" + }, + { + "bbox": [ + 104, + 212, + 506, + 377 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 212, + 506, + 377 + ], + "type": "inline_equation", + "content": "\\alpha = 128" + }, + { + "bbox": [ + 104, + 212, + 506, + 377 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 389, + 164, + 400 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 389, + 164, + 400 + ], + "spans": [ + { + "bbox": [ + 105, + 389, + 164, + 400 + ], + "type": "text", + "content": "3.2 Dataset" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 410, + 506, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 410, + 506, + 477 + ], + "spans": [ + { + "bbox": [ + 104, + 410, + 506, + 477 + ], + "type": "text", + "content": "We use two datasets—Emilia and LibriTTS—as our training datasets. Emilia [12] is an in-the-wild dataset where the speech is web-scaled and the transcriptions are pseudo-labeled. We use only the English subset of this multi-lingual corpus, which is about 40,000 hours. LibriTTS [50] is a reading-style corpus based on LibriSpeech [34]. We use all the training splits in LibriTTS for training, which is approximately 600 hours of speech. In addition, the test-clean split in LibriSpeech is used for evaluation purposes for our TASTE tokenizer and TASLMs." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 491, + 160, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 491, + 160, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 491, + 160, + 504 + ], + "type": "text", + "content": "4 Result" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 516, + 506, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 516, + 506, + 551 + ], + "spans": [ + { + "bbox": [ + 104, + 516, + 506, + 551 + ], + "type": "text", + "content": "We separate the evaluation into two phases: Section 4.1 shows the results regarding our TASTE tokenization; while Section 4.2 evaluates our TASLM across multiple aspects, including acoustic, semantic, and continuation. For clarity, the metrics are introduced within each section." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 563, + 265, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 563, + 265, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 265, + 574 + ], + "type": "text", + "content": "4.1 Results of TASTE Tokenization" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 582, + 285, + 595 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 582, + 285, + 595 + ], + "spans": [ + { + "bbox": [ + 105, + 582, + 285, + 595 + ], + "type": "text", + "content": "4.1.1 Speech Reconstruction Evaluation" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 602, + 504, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 602, + 504, + 635 + ], + "spans": [ + { + "bbox": [ + 104, + 602, + 504, + 635 + ], + "type": "text", + "content": "We first represent the speech reconstruction evaluation results. For comprehensive evaluation, we use different metrics, including the reference-free metrics for quality assessment, and the reference-based metrics for evaluating the similarity between the reconstructed and the original speech." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 647, + 505, + 704 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 647, + 505, + 704 + ], + "spans": [ + { + "bbox": [ + 104, + 647, + 505, + 704 + ], + "type": "text", + "content": "Quality Assessment We use ASR-WER, UTMOS [38], and DNS-MOS [37] as our metrics for evaluating the speech quality. For ASR-WER, we use HuBERT-Large [14] as the ASR model to transcribe the speech, and then calculate the word-error rate (WER) on the transcription. " + }, + { + "bbox": [ + 104, + 647, + 505, + 704 + ], + "type": "inline_equation", + "content": "{}^{1}" + }, + { + "bbox": [ + 104, + 647, + 505, + 704 + ], + "type": "text", + "content": " UTMOS and DNS-MOS are both neural-based MOS predictors. While both evaluate the speech quality, the design purpose of DNS-MOS makes it more suitable for evaluation regarding the noise levels." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 118, + 711, + 374, + 723 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 711, + 374, + 723 + ], + "spans": [ + { + "bbox": [ + 118, + 711, + 374, + 723 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 110, + 504, + 252 + ], + "blocks": [ + { + "bbox": [ + 105, + 77, + 504, + 110 + ], + "lines": [ + { + "bbox": [ + 105, + 77, + 504, + 110 + ], + "spans": [ + { + "bbox": [ + 105, + 77, + 504, + 110 + ], + "type": "text", + "content": "Table 1: The speech tokenization evaluation results on the test-clean split of LibriTTS. The evaluation is separated into the QUALITY and the SIMILARITY assessments, as introduced in Section 4.1.1. We use gray text to indicate the worst-performing methods in each metric." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 110, + 504, + 252 + ], + "lines": [ + { + "bbox": [ + 106, + 110, + 504, + 252 + ], + "spans": [ + { + "bbox": [ + 106, + 110, + 504, + 252 + ], + "type": "table", + "html": "
MethodBitrateQUALITYSIMILARITY
WER ↓UTMOSDNSMOSViSQOLDrtn. Con.Spkr. Sim.MUSHRA
Ground Truth256k2.1%4.093.84---76.6
Encodec [5]15005.1%1.583.263.460.940.63-
30002.6%2.353.483.810.960.7825.6
SpeechTokenizer [51]5005.2%1.272.992.800.940.35-
20003.0%3.563.603.650.970.8053.9
40002.5%3.903.764.030.980.92-
Mimi [6]10003.1%3.603.603.620.960.8267.6
S3 token (topline) [7]6003.0%4.183.903.300.960.8270.2
Text-only (baseline)~505.9%4.314.112.440.570.7842.6
TASTE (ours)~1504.4%4.294.103.050.910.8068.3
", + "image_path": "cd044b89abcba5b2d464085ca59cc04326658efa54fca2b094bce5b36bb2d8b4.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 274, + 506, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 274, + 506, + 361 + ], + "spans": [ + { + "bbox": [ + 104, + 274, + 506, + 361 + ], + "type": "text", + "content": "Similarity Assessment For similarity, we measure ViSQOL, duration consistency (Drtn. Con.), speaker similarity (Spkr. Sim.) and the MUSHRA test as human evaluation. We use ViSQOL [4] is a production-ready tool that predicts speech quality via spectro-temporal image similarity comparisons. For the duration consistency, we first get the word-level alignment of the transcriptions of the original and the reconstructed speech using Montreal Forced Aligner [26]; then we calculate if the duration between each of the same words is matched under a preset tolerance window, which is set to 50 milliseconds. For MUSHRA human listening test, we reference the original protocol [40] to instruct evaluators to rate the similarity and quality on a scale of 1 to 100 with reference given." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 373, + 506, + 548 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 373, + 506, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 373, + 506, + 548 + ], + "type": "text", + "content": "Speech Reconstruction Results The evaluation results of our speech reconstruction on LibriSpeech are shown in Table 1. We highlight that our TASTE speech tokenization has the lowest bitrate among all the other speech tokenization methods. Note that since the speech tokenization is dynamic in frequency, we estimate our bitrate based on calculating the overall token count and the duration over the testing set. Despite the low bitrate, we generally attain much better performance comparing with the worst performing methods (gray text in the table) across each metric. Moreover, on the quality assessment, our MOS prediction scores are the second highest and even surpasses the ground truth, showcasing the reconstructed speech is of high quality. Next, we focus on the results of the similarity assessment. As for the duration consistency, we score the second-worst performance comparing with the other methods. We attribute this to the fact that our tokenization compress the sequence in a very dynamic way. Despite that, we still outperform the text-only method with a large margin, perform close towards other speech tokenization methods which all have a fixed down-sampling rate. Lastly, our method attains the second-highest MUSHRA score (excluding the ground-truth anchor). This highlights TASTE's effectiveness: even without reproducing every microscopic detail, it still yields perceptually high-quality speech in human listening tests. Overall, TASTE carries rich paralinguistic information, facilitating high-end speech reconstruction under an extremely low bitrate." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 560, + 309, + 572 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 560, + 309, + 572 + ], + "spans": [ + { + "bbox": [ + 104, + 560, + 309, + 572 + ], + "type": "text", + "content": "4.1.2 TASTE for Text-Aligned Speech Editing" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 579, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 579, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 579, + 506, + 723 + ], + "type": "text", + "content": "After comprehensive speech-reconstruction experiments, we show that TASTE can also perform text-aligned speech editing. Suppose we have two utterances with the same transcript but different paralinguistic characteristics. By exchanging their TASTE token sequences word by word, we ask whether the associated paralinguistic traits are transferred as well. To make the effect easy to see, we choose utterances that differ mainly in speaking rate and focus on duration changes. The overall text-aligned editing procedure is describe as follows: 1) Extract the TASTE tokens " + }, + { + "bbox": [ + 104, + 579, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\hat{z}^{\\mathrm{orig}}" + }, + { + "bbox": [ + 104, + 579, + 506, + 723 + ], + "type": "text", + "content": " for each source utterance 2) Swap the tokens at the desired text positions, resulting in edited TASTE tokens " + }, + { + "bbox": [ + 104, + 579, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\hat{z}^{\\mathrm{edit}}" + }, + { + "bbox": [ + 104, + 579, + 506, + 723 + ], + "type": "text", + "content": ". 3) Decode the edited token sequence " + }, + { + "bbox": [ + 104, + 579, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\hat{z}^{\\mathrm{edit}}" + }, + { + "bbox": [ + 104, + 579, + 506, + 723 + ], + "type": "text", + "content": " back to speech. In Figure 3, we present the alignments obtained from MFA [26] of the original speech and the speech after editing, and compare them horizontally. As shown in the figure, words whose tokens were swapped exhibit clear duration shifts, while the untouched words keep their original timing—evidence that TASTE enables precise, text-aligned manipulation. Additional examples that target other paralinguistic dimensions are provided on our demo page." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 70, + 261, + 186 + ], + "blocks": [ + { + "bbox": [ + 108, + 70, + 261, + 186 + ], + "lines": [ + { + "bbox": [ + 108, + 70, + 261, + 186 + ], + "spans": [ + { + "bbox": [ + 108, + 70, + 261, + 186 + ], + "type": "image", + "image_path": "2f0bfffa21da24cd7b583028f31b507e7681918acc4a32cdc33e9ee74b4bd996.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 193, + 506, + 239 + ], + "lines": [ + { + "bbox": [ + 104, + 193, + 506, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 193, + 506, + 239 + ], + "type": "text", + "content": "Figure 3: An illustration of TASTE for text-aligned speech editing. On the left shows the process of our text-aligned speech editing. We first extract the TASTE tokens; swap the tokens partially; and then decode the edited TASTE tokens into edited speech. On the right shows an example visualization. Only the durations of the words with exchanged TASTE tokens show significant difference." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 264, + 70, + 504, + 187 + ], + "blocks": [ + { + "bbox": [ + 264, + 70, + 504, + 187 + ], + "lines": [ + { + "bbox": [ + 264, + 70, + 504, + 187 + ], + "spans": [ + { + "bbox": [ + 264, + 70, + 504, + 187 + ], + "type": "image", + "image_path": "81e12a75d5eaba0cef27031e19cf6686815234b8941b4228e622e8d418018a9b.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 258, + 357, + 272 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 258, + 357, + 272 + ], + "spans": [ + { + "bbox": [ + 105, + 258, + 357, + 272 + ], + "type": "text", + "content": "4.2 Evaluating Text-Aligned Spoken Language Modeling" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 279, + 506, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 279, + 506, + 324 + ], + "spans": [ + { + "bbox": [ + 104, + 279, + 506, + 324 + ], + "type": "text", + "content": "To provide a comprehensive evaluation of our text-aligned spoken language modeling (TASLM), we first compare our pre-trained SLM with other methods through speech continuation and likelihood-based benchmarks in Section 4.2.1. Then, to further investigate the understanding capabilities, we conduct evaluation with spoken question answering in Section 4.2.2." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 335, + 323, + 347 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 335, + 323, + 347 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 323, + 347 + ], + "type": "text", + "content": "4.2.1 Comparing TASLM with Pretrained SLMs" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 354, + 506, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 354, + 506, + 443 + ], + "spans": [ + { + "bbox": [ + 104, + 354, + 506, + 443 + ], + "type": "text", + "content": "Speech Continuation Evaluation A typical way to evaluate the pre-trained SLM is by performing conditional generation. Following previous work [11, 22], we use the 3-second prompt speech from the LibriSpeech test-clean. To evaluate the quality of the continuations generated by the SLMs, we employ GPT-4o to assign MOS scores regarding the transcribed speech continuation using ASR, focusing on the semantic coherence of the continuation. In addition, we compute UTMOS as for evaluating the speech quality and naturalness. Last but not least, we conduct human listening test, in which each evaluator is asked to give a MOS score regarding the overall performance of the generated speech continuation. The details of the instructions for GPT-4o and human are in Appendix." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 454, + 506, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 454, + 506, + 489 + ], + "spans": [ + { + "bbox": [ + 104, + 454, + 506, + 489 + ], + "type": "text", + "content": "Likelihood-Based Evaluation Following previous work [11, 22, 33], we also evaluate our SLMs through likelihood-based benchmarks, where the accuracy score is based on whether the model chooses the correct continuation from the two given speech utterances based on its output likelihoods." + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 106, + 568, + 504, + 709 + ], + "blocks": [ + { + "bbox": [ + 104, + 513, + 505, + 567 + ], + "lines": [ + { + "bbox": [ + 104, + 513, + 505, + 567 + ], + "spans": [ + { + "bbox": [ + 104, + 513, + 505, + 567 + ], + "type": "text", + "content": "Table 2: Pretrained SLM speech continuation and likelihood-based next-speech selection results. The superscripts at the bottom of the table indicate the base models used by each SLM, indicated by superscripts. Cascade models refer to the pipeline with ASR (whisper-large-v3), text continuation by LMs, and TTS (CosyVoice). This comparison evaluates SLMs and cascade models in continuation evaluation. As shown in the table, TASLM tends to preserve the semantic capabilities of LMs." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 568, + 504, + 709 + ], + "lines": [ + { + "bbox": [ + 106, + 568, + 504, + 709 + ], + "spans": [ + { + "bbox": [ + 106, + 568, + 504, + 709 + ], + "type": "table", + "html": "
MethodFinetuned / base parametersCONTINUATIONLIKELIHOOD
GPT-4oUTMOSHumanSALMONStoryClozeOverall
Cascade
Cascade (LLaMA3.2-1Bα)-3.154.254.00---
Cascade (LLaMA2-7Bβ)-3.434.253.98---
Spoken LMs
TWIST 1.3B [11]1.3B / 1.3Bθ1.483.251.9562.561.562.0
TWIST 7B [11]7B / 7Bγ1.443.272.0463.464.764.1
Spirit LM [33]7B / 7Bβ2.793.412.3859.172.065.6
Spirit LM Expr. [33]7B / 7Bβ1.903.402.4169.066.267.6
Baseline (S3 token)45M / 1.3Bα1.374.042.8450.258.754.5
TASLM 1B (token)45M / 1.3Bα3.084.073.9360.876.568.7
TASLM 1B (embed.)45M / 1.3Bα3.164.224.1657.776.767.2
", + "image_path": "e45e9d10721807dc88752e032a3bc967c5e37ba7fac2be48a2c5ec4780a4ab95.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 109, + 710, + 312, + 718 + ], + "lines": [ + { + "bbox": [ + 109, + 710, + 312, + 718 + ], + "spans": [ + { + "bbox": [ + 109, + 710, + 312, + 718 + ], + "type": "text", + "content": "Base models: " + }, + { + "bbox": [ + 109, + 710, + 312, + 718 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 109, + 710, + 312, + 718 + ], + "type": "text", + "content": " LLaMA3.2-1B, " + }, + { + "bbox": [ + 109, + 710, + 312, + 718 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 109, + 710, + 312, + 718 + ], + "type": "text", + "content": " LLaMA2-7B, " + }, + { + "bbox": [ + 109, + 710, + 312, + 718 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 109, + 710, + 312, + 718 + ], + "type": "text", + "content": " LLaMA-7B, " + }, + { + "bbox": [ + 109, + 710, + 312, + 718 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 109, + 710, + 312, + 718 + ], + "type": "text", + "content": " OPT-1.3B" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 128 + ], + "type": "text", + "content": "We adopt two established benchmarks SALMON [25] and spoken StoryCloze [11, 29], which covers the acoustic aspect and the semantic aspect, respectively. Since both benchmarks contain multiple tasks, we report the average accuracy across these tasks within each benchmark for simplicity. The detailed results are in Appendix A.5.1 for the interested readers. We also report the mean of the SALMON and StoryCloze as an overall assessment for both aspects." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 139, + 506, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 139, + 506, + 271 + ], + "spans": [ + { + "bbox": [ + 104, + 139, + 506, + 271 + ], + "type": "text", + "content": "Results The results of TASLM comparing to other pre-trained SLM are in Table 2. First, we highlight that our TASLMs have achieved significantly higher scores on speech continuation across human and machine evaluations; and good performance on the likelihood-based benchmarks. Note that our base language model contains only 1.3 billion parameters, showing the effectiveness of using TASTE for joint modeling. Compared to the cascade method that has the same base model (first row), our " + }, + { + "bbox": [ + 104, + 139, + 506, + 271 + ], + "type": "inline_equation", + "content": "\\mathrm{TASLM}_{\\mathrm{emb}}" + }, + { + "bbox": [ + 104, + 139, + 506, + 271 + ], + "type": "text", + "content": " scores comparable on GPT-4o but better on human MOS. This indicates that our generated speech is more natural than the cascade one that utilizes TTS for synthesis. Next, our TASLM is the only SLM that not only maintains but even surpasses the performance of its corresponding text-base model. Moreover, we demonstrate that directly using the S3 token for joint modeling following [46] does not yield performance comparable in any aspect. The result further strengthen the intuition behind TASTE (mitigating the length-mismatch during tokenization stage facilitates effective joint spoken language modeling)." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 282, + 312, + 295 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 282, + 312, + 295 + ], + "spans": [ + { + "bbox": [ + 105, + 282, + 312, + 295 + ], + "type": "text", + "content": "4.2.2 TASLM for Spoken Question Answering" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 300, + 286, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 300, + 286, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 300, + 286, + 475 + ], + "type": "text", + "content": "Following [6], we conduct evaluation on spoken question answering to investigate the understanding ability of our TASLM. For this experiment, we use the " + }, + { + "bbox": [ + 104, + 300, + 286, + 475 + ], + "type": "inline_equation", + "content": "\\mathrm{TASLM}_{\\mathrm{emb}}" + }, + { + "bbox": [ + 104, + 300, + 286, + 475 + ], + "type": "text", + "content": " for simplicity. We compare our pre-trained only SLM with other instruction-finetuned joint SLMs such as Mini-Omni [46], Moshi [6], and Llama-Omni [9]. We use two spoken question answering benchmarks, Web Questions [2] and LLaMA-Questions [30], following [30]. We report the accuracy of answer containment. For fairness, we report not only the performance of the speech-text joint SLMs, but also the base text LLM they used if applicable. Our results indicate that our TASLM is the only method that does not" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 475, + 504, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 475, + 504, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 475, + 504, + 498 + ], + "type": "text", + "content": "degrade the corresponding text base LLM. We attribute the phenomenon to the effectiveness of our TASTE tokenization for joint speech-text modeling." + } + ] + } + ], + "index": 4 + }, + { + "type": "table", + "bbox": [ + 293, + 353, + 504, + 458 + ], + "blocks": [ + { + "bbox": [ + 291, + 314, + 506, + 347 + ], + "lines": [ + { + "bbox": [ + 291, + 314, + 506, + 347 + ], + "spans": [ + { + "bbox": [ + 291, + 314, + 506, + 347 + ], + "type": "text", + "content": "Table 3: Evaluation of spoken question answering. Performance across modalities is compared row-wise, where T denotes text and A denotes audio." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 293, + 353, + 504, + 458 + ], + "lines": [ + { + "bbox": [ + 293, + 353, + 504, + 458 + ], + "spans": [ + { + "bbox": [ + 293, + 353, + 504, + 458 + ], + "type": "table", + "html": "
MethodModeWeb Q.LLaMA-Q.
Mini-Omni 0.5B(T→T)T21.339.0
Mini-Omni 0.5B [46]T+A4.511.6
Helium 7B (text)T32.375.0
Moshi 7B [6]T+A26.662.3
LLaMA3.1-8B-InstructT60.471.7
Llama-Omni-8B [9]T+A35.567.3
LLaMA3.2-1B†T24.051.0
TASLM 1B (embed.)†T+A27.157.6
", + "image_path": "3427cad281a5439a9f7df10b83a4b3dac65abff326e4200bd70cb65812769ba6.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 298, + 458, + 471, + 468 + ], + "lines": [ + { + "bbox": [ + 298, + 458, + 471, + 468 + ], + "spans": [ + { + "bbox": [ + 298, + 458, + 471, + 468 + ], + "type": "text", + "content": "We apply few-shot learning to facilitate question answering." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 513, + 185, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 513, + 185, + 525 + ], + "spans": [ + { + "bbox": [ + 105, + 513, + 185, + 525 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 538, + 506, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 538, + 506, + 638 + ], + "spans": [ + { + "bbox": [ + 104, + 538, + 506, + 638 + ], + "type": "text", + "content": "In this work, we propose Text-Aligned Speech Tokenization and Embedding (TASTE), to facilitate joint speech-text spoken language modeling. By aggregating proper encoder representation through the specialized cross-attention mechanism and taking the ASR model as initialization, we make the speech tokenization text-aligned in an end-to-end manner with no explicit word alignment required. We conduct extensive evaluation on our TASTE tokenizer. Our results show that TASTE allows high quality speech reconstruction at an extremely low bitrate. With our text-aligned speech tokenization and embedding, joint speech-text modeling becomes straightforward and effective. Our experimental results indicate that TASTE enables turning a text LLM into a spoken one with the simple parameter-efficient finetuning technique applied." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 648, + 507, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 648, + 507, + 717 + ], + "spans": [ + { + "bbox": [ + 104, + 648, + 507, + 717 + ], + "type": "text", + "content": "**Limitation** Several limitations of our current work point to promising avenues for future research. First, neither our TASTE tokenization nor the text-aligned SLM has been optimized for time efficiency; developing a low-latency, streaming variant remains future work. Second, we have evaluated TASTE only on English data—its portability to other languages deserves thorough investigation. Third, although our pretrained SLM generates high-quality continuations, it does not yet support robust turn-taking or instruction-following behavior, both of which are essential for truly interactive systems." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 165, + 84 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 89, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 111, + 89, + 505, + 123 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 89, + 505, + 123 + ], + "spans": [ + { + "bbox": [ + 111, + 89, + 505, + 123 + ], + "type": "text", + "content": "[1] Alexei Baevski, Yuhao Zhou, Abdelrahman Mohamed, and Michael Auli. wav2vec 2.0: A framework for self-supervised learning of speech representations. In Advances in Neural Information Processing Systems, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 129, + 505, + 163 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 129, + 505, + 163 + ], + "spans": [ + { + "bbox": [ + 111, + 129, + 505, + 163 + ], + "type": "text", + "content": "[2] Jonathan Berant, Andrew Chou, Roy Frostig, and Percy Liang. Semantic parsing on freebase from question-answer pairs. In Proceedings of the 2013 conference on empirical methods in natural language processing, 2013." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 169, + 506, + 203 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 169, + 506, + 203 + ], + "spans": [ + { + "bbox": [ + 111, + 169, + 506, + 203 + ], + "type": "text", + "content": "[3] Li-Wei Chen and Alexander Rudnicky. Fine-grained style control in transformer-based text-to-speech synthesis. In ICASSP 2022-2022 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 209, + 504, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 209, + 504, + 243 + ], + "spans": [ + { + "bbox": [ + 111, + 209, + 504, + 243 + ], + "type": "text", + "content": "[4] Michael Chinen, Felicia SC Lim, Jan Skoglund, Nikita Gureev, Feargus O'Gorman, and Andrew Hines. Visqol v3: An open source production ready objective speech and audio metric. In 2020 twelfth international conference on quality of multimedia experience (QoMEX), 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 249, + 504, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 249, + 504, + 272 + ], + "spans": [ + { + "bbox": [ + 111, + 249, + 504, + 272 + ], + "type": "text", + "content": "[5] Alexandre Défossez, Jade Copet, Gabriel Synnaeve, and Yossi Adi. High fidelity neural audio compression. Transactions on Machine Learning Research, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 278, + 505, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 278, + 505, + 312 + ], + "spans": [ + { + "bbox": [ + 111, + 278, + 505, + 312 + ], + "type": "text", + "content": "[6] Alexandre Défossez, Laurent Mazaré, Manu Orsini, Amélie Royer, Patrick Pérez, Hervé Jégou, Edouard Grave, and Neil Zeghidour. Moshi: a speech-text foundation model for real-time dialogue. arXiv preprint arXiv:2410.00037, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 318, + 504, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 318, + 504, + 352 + ], + "spans": [ + { + "bbox": [ + 111, + 318, + 504, + 352 + ], + "type": "text", + "content": "[7] Zhihao Du, Qian Chen, Shiliang Zhang, Kai Hu, Heng Lu, Yexin Yang, Hangrui Hu, Siqi Zheng, Yue Gu, Ziyang Ma, et al. Cosyvoice: A scalable multilingual zero-shot text-to-speech synthesizer based on supervised semantic tokens. arXiv preprint arXiv:2407.05407, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 358, + 504, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 358, + 504, + 392 + ], + "spans": [ + { + "bbox": [ + 111, + 358, + 504, + 392 + ], + "type": "text", + "content": "[8] Zhihao Du, Yuxuan Wang, Qian Chen, Xian Shi, Xiang Lv, Tianyu Zhao, Zhifu Gao, Yexin Yang, Changfeng Gao, Hui Wang, et al. Cosyvoice 2: Scalable streaming speech synthesis with large language models. CoRR, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 398, + 506, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 398, + 506, + 422 + ], + "spans": [ + { + "bbox": [ + 111, + 398, + 506, + 422 + ], + "type": "text", + "content": "[9] Qingkai Fang, Shoutao Guo, Yan Zhou, Zhengrui Ma, Shaolei Zhang, and Yang Feng. LlamaOmni: Seamless speech interaction with large language models. CoRR, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 426, + 504, + 450 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 426, + 504, + 450 + ], + "spans": [ + { + "bbox": [ + 107, + 426, + 504, + 450 + ], + "type": "text", + "content": "[10] Sanchit Gandhi, Patrick von Platen, and Alexander M Rush. Distil-whisper: Robust knowledge distillation via large-scale pseudo labelling. arXiv preprint arXiv:2311.00430, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 456, + 504, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 456, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 106, + 456, + 504, + 491 + ], + "type": "text", + "content": "[11] Michael Hassid, Tal Remez, Tu Anh Nguyen, Itai Gat, Alexis Conneau, Felix Kreuk, Jade Copet, Alexandre Defossez, Gabriel Synnaeve, Emmanuel Dupoux, et al. Textually pretrained speech language models. Advances in Neural Information Processing Systems, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 495, + 506, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 495, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 107, + 495, + 506, + 540 + ], + "type": "text", + "content": "[12] Haorui He, Zengqiang Shang, Chaoren Wang, Xuyuan Li, Yicheng Gu, Hua Hua, Liwei Liu, Chen Yang, Jiaqi Li, Peiyang Shi, et al. Emilia: An extensive, multilingual, and diverse speech dataset for large-scale speech generation. In 2024 IEEE Spoken Language Technology Workshop (SLT), 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 546, + 504, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 546, + 504, + 581 + ], + "spans": [ + { + "bbox": [ + 106, + 546, + 504, + 581 + ], + "type": "text", + "content": "[13] Pin-Lun Hsu, Yun Dai, Vignesh Kothapalli, Qingquan Song, Shao Tang, Siyu Zhu, Steven Shimizu, Shivam Sahni, Haowen Ning, and Yanning Chen. Liger kernel: Efficient triton kernels for llm training. arXiv preprint arXiv:2410.10989, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 586, + 506, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 586, + 506, + 632 + ], + "spans": [ + { + "bbox": [ + 107, + 586, + 506, + 632 + ], + "type": "text", + "content": "[14] Wei-Ning Hsu, Benjamin Bolte, Yao-Hung Hubert Tsai, Kushal Lakhotia, Ruslan Salakhutdinov, and Abdelrahman Mohamed. Hubert: Self-supervised speech representation learning by masked prediction of hidden units. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2021." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 638, + 506, + 671 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 638, + 506, + 671 + ], + "spans": [ + { + "bbox": [ + 107, + 638, + 506, + 671 + ], + "type": "text", + "content": "[15] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuzhhi Li, Shean Wang, Lu Wang, and Weizhu Chen. Lora: Low-rank adaptation of large language models. arXiv preprint arXiv:2106.09685, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 677, + 506, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 677, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 107, + 677, + 506, + 721 + ], + "type": "text", + "content": "[16] Zeqian Ju, Yuancheng Wang, Kai Shen, Xu Tan, Detai Xin, Dongchao Yang, Yanqing Liu, Yichong Leng, Kaitao Song, Siliang Tang, et al. Naturalspeech 3: Zero-shot speech synthesis with factorized codec and diffusion models. International Conference on Machine Learning, 2024." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 96 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 96 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 96 + ], + "type": "text", + "content": "[17] Jaehyeon Kim, Keon Lee, Seungjun Chung, and Jaewoong Cho. Clam-tts: Improving neural codec language model for zero-shot text-to-speech. *ICLR*, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 103, + 505, + 138 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 103, + 505, + 138 + ], + "spans": [ + { + "bbox": [ + 106, + 103, + 505, + 138 + ], + "type": "text", + "content": "[18] Taku Kudo. Subword regularization: Improving neural network translation models with multiple subword candidates. Proceedings of the 56th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2018." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 146, + 504, + 181 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 146, + 504, + 181 + ], + "spans": [ + { + "bbox": [ + 107, + 146, + 504, + 181 + ], + "type": "text", + "content": "[19] Taku Kudo and John Richardson. Sentencepiece: A simple and language independent subword tokenizer and tokenizer for neural text processing. Proceedings of the 2018 Conference on Empirical Methods in Natural Language Processing: System Demonstrations, 2018." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 188, + 506, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 188, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 107, + 188, + 506, + 222 + ], + "type": "text", + "content": "[20] Rithesh Kumar, Prem Seetharaman, Alejandro Luebs, Ishaan Kumar, and Kundan Kumar. High-fidelity audio compression with improved rvqgan. Advances in Neural Information Processing Systems, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 231, + 506, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 231, + 506, + 275 + ], + "spans": [ + { + "bbox": [ + 106, + 231, + 506, + 275 + ], + "type": "text", + "content": "[21] Kushal Lakhotia, Eugene Kharitonov, Wei-Ning Hsu, Yossi Adi, Adam Polyak, Benjamin Bolte, Tu-Anh Nguyen, Jade Copet, Alexei Baevski, Abdelrahman Mohamed, et al. On generative spoken language modeling from raw audio. Transactions of the Association for Computational Linguistics, 2021." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 284, + 506, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 284, + 506, + 319 + ], + "spans": [ + { + "bbox": [ + 107, + 284, + 506, + 319 + ], + "type": "text", + "content": "[22] Guan-Ting Lin, Prashanth Gurunath Shivakumar, Aditya Gourav, Yile Gu, Ankur Gandhe, Hung-yi Lee, and Ivan Bulyko. Align-slm: Textless spoken language models with reinforcement learning from ai feedback. arXiv preprint arXiv:2411.01834, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 327, + 506, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 327, + 506, + 360 + ], + "spans": [ + { + "bbox": [ + 107, + 327, + 506, + 360 + ], + "type": "text", + "content": "[23] Yaron Lipman, Ricky TQ Chen, Heli Ben-Hamu, Maximilian Nickel, and Matt Le. Flow matching for generative modeling. The Eleventh International Conference on Learning Representations, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 369, + 506, + 414 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 369, + 506, + 414 + ], + "spans": [ + { + "bbox": [ + 107, + 369, + 506, + 414 + ], + "type": "text", + "content": "[24] Alexander H Liu, Sang-gil Lee, Chao-Han Huck Yang, Yuan Gong, Yu-Chiang Frank Wang, James R Glass, Rafael Valle, and Bryan Catanzaro. Uniwav: Towards unified pre-training for speech representation learning and generation. The Thirteenth International Conference on Learning Representations, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 422, + 504, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 422, + 504, + 445 + ], + "spans": [ + { + "bbox": [ + 107, + 422, + 504, + 445 + ], + "type": "text", + "content": "[25] Gallil Maimon, Amit Roth, and Yossi Adi. Salmon: A suite for acoustic language model evaluation. arXiv preprint arXiv:2409.07437, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 454, + 506, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 454, + 506, + 487 + ], + "spans": [ + { + "bbox": [ + 107, + 454, + 506, + 487 + ], + "type": "text", + "content": "[26] Michael McAuliffe, Michaela Socolof, Sarah Mihuc, Michael Wagner, and Morgan Sonderegger. Montreal forced aligner: Trainable text-speech alignment using kaldi. In Interspeech 2017, 2017." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 496, + 506, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 496, + 506, + 529 + ], + "spans": [ + { + "bbox": [ + 107, + 496, + 506, + 529 + ], + "type": "text", + "content": "[27] Shivam Mehta, Ambika Kirkland, Harm Lameris, Jonas Beskow, Éva Székely, and Gustav Eje Henter. Overflow: Putting flows on top of neural transducers for better tts. Interspeech 2023, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 539, + 506, + 572 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 539, + 506, + 572 + ], + "spans": [ + { + "bbox": [ + 107, + 539, + 506, + 572 + ], + "type": "text", + "content": "[28] Lingwei Meng, Long Zhou, Shujie Liu, Sanyuan Chen, Bing Han, Shujie Hu, Yanqing Liu, Jinyu Li, Sheng Zhao, Xixin Wu, et al. Autoregressive speech synthesis without vector quantization. CoRR, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 582, + 504, + 616 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 582, + 504, + 616 + ], + "spans": [ + { + "bbox": [ + 107, + 582, + 504, + 616 + ], + "type": "text", + "content": "[29] Nasrin Mostafazadeh, Nathanael Chambers, Xiaodong He, Devi Parikh, Dhruv Batra, Lucy Vanderwende, Pushmeet Kohli, and James Allen. A corpus and evaluation framework for deeper understanding of commonsense stories. Proceedings of NAACL-HLT, 2016." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 624, + 506, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 624, + 506, + 669 + ], + "spans": [ + { + "bbox": [ + 107, + 624, + 506, + 669 + ], + "type": "text", + "content": "[30] Eliya Nachmani, Alon Levkovitch, Roy Hirsch, Julian Salazar, Chulayuth Asawaroengchai, Soroosh Mariooryad, Ehud Rivlin, RJ Skerry-Ryan, and Michelle Tadmor Ramanovich. Spoken question answering and speech continuation using spectrogram-powered llm. In The Twelfth International Conference on Learning Representations, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 677, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 677, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 107, + 677, + 506, + 723 + ], + "type": "text", + "content": "[31] Tu Anh Nguyen, Maureen de Seyssel, Patricia Rozé, Morgane Rivière, Evgeny Kharitonov, Alexei Baevski, Ewan Dunbar, and Emmanuel Dupoux. The zero resource speech benchmark 2021: Metrics and baselines for unsupervised spoken language modeling. NeuRIPS Workshop on Self-Supervised Learning for Speech and Audio Processing, 2020." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 506, + 117 + ], + "type": "text", + "content": "[32] Tu Anh Nguyen, Eugene Kharitonov, Jade Copet, Yossi Adi, Wei-Ning Hsu, Ali Elkahky, Paden Tomasello, Robin Algayres, Benoit Sagot, Abdelrahman Mohamed, et al. Generative spoken dialogue language modeling. Transactions of the Association for Computational Linguistics, 2023." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 125, + 506, + 171 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 125, + 506, + 171 + ], + "spans": [ + { + "bbox": [ + 106, + 125, + 506, + 171 + ], + "type": "text", + "content": "[33] Tu Anh Nguyen, Benjamin Muller, Bokai Yu, Marta R Costa-Jussa, Maha Elbayad, Sravya Popuri, Christophe Ropers, Paul-Ambroise Duquenne, Robin Algayres, Ruslan Mavlyutov, et al. Spirit-lm: Interleaved spoken and written language model. Transactions of the Association for Computational Linguistics, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 178, + 504, + 214 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 178, + 504, + 214 + ], + "spans": [ + { + "bbox": [ + 106, + 178, + 504, + 214 + ], + "type": "text", + "content": "[34] Vassil Panayotov, Guoguo Chen, Daniel Povey, and Sanjeev Khudanpur. Librispeech: An asr corpus based on public domain audio books. In 2015 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2015." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 220, + 504, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 220, + 504, + 256 + ], + "spans": [ + { + "bbox": [ + 107, + 220, + 504, + 256 + ], + "type": "text", + "content": "[35] Alec Radford, Jong Wook Kim, Tao Xu, Greg Brockman, Christine McLeavey, and Ilya Sutskever. Robust speech recognition via large-scale weak supervision. In International conference on machine learning, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 262, + 504, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 262, + 504, + 308 + ], + "spans": [ + { + "bbox": [ + 106, + 262, + 504, + 308 + ], + "type": "text", + "content": "[36] Jeff Rasley, Samyam Rajbhandari, Olatunjri Ruwase, and Yuxiong He. Deepspeed: System optimizations enable training deep learning models with over 100 billion parameters. In Proceedings of the 26th ACM SIGKDD international conference on knowledge discovery and data mining, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 316, + 504, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 316, + 504, + 351 + ], + "spans": [ + { + "bbox": [ + 107, + 316, + 504, + 351 + ], + "type": "text", + "content": "[37] Chandan KA Reddy, Vishak Gopal, and Ross Cutler. Dnsmos: A non-intrusive perceptual objective speech quality metric to evaluate noise suppressors. In ICASSP 2021-2021 IEEE International Conference on Acoustics, Speech and Signal Processing (ICASSP), 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 358, + 504, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 358, + 504, + 393 + ], + "spans": [ + { + "bbox": [ + 107, + 358, + 504, + 393 + ], + "type": "text", + "content": "[38] Takaaki Saeki, Detai Xin, Wataru Nakata, Tomoki Koriyama, Shinnosuke Takamichi, and Hiroshi Saruwatari. Utmos: Utokyo-sarulab system for voicemos challenge 2022. Interspeech 2022, 2022." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 401, + 504, + 437 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 401, + 504, + 437 + ], + "spans": [ + { + "bbox": [ + 107, + 401, + 504, + 437 + ], + "type": "text", + "content": "[39] Rico Sennrich, Barry Haddow, and Alexandra Birch. Neural machine translation of rare words with subword units. Proceedings of the 54th Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), 2016." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 444, + 506, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 444, + 506, + 468 + ], + "spans": [ + { + "bbox": [ + 107, + 444, + 506, + 468 + ], + "type": "text", + "content": "[40] B Series. Method for the subjective assessment of intermediate quality level of audio systems. International Telecommunication Union Radiocommunication Assembly, 2014." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 475, + 504, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 475, + 504, + 510 + ], + "spans": [ + { + "bbox": [ + 107, + 475, + 504, + 510 + ], + "type": "text", + "content": "[41] Hubert Siuzdak, Florian Grötschla, and Luca A Lanzendorfer. Snac: Multi-scale neural audio codec. Audio Imagination: NeurIPS 2024 Workshop AI-Driven Speech, Music, and Sound Generation, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 518, + 506, + 564 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 518, + 506, + 564 + ], + "spans": [ + { + "bbox": [ + 107, + 518, + 506, + 564 + ], + "type": "text", + "content": "[42] Hsiang-Sheng Tsai, Heng-Jui Chang, Wen-Chin Huang, Zili Huang, Kushal Lakhotia, Shu-wen Yang, Shuyan Dong, Andy T Liu, Cheng-I Jeff Lai, Jiatong Shi, et al. Superb-sg: Enhanced speech processing universal performance benchmark for semantic and generative capabilities. Proceedings of the 60th Annual Meeting of the Association for Computational Linguistics, 2022." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 571, + 506, + 605 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 571, + 506, + 605 + ], + "spans": [ + { + "bbox": [ + 107, + 571, + 506, + 605 + ], + "type": "text", + "content": "[43] Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, Lukasz Kaiser, and Illia Polosukhin. Attention is all you need. Advances in neural information processing systems, 2017." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 614, + 504, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 614, + 504, + 649 + ], + "spans": [ + { + "bbox": [ + 107, + 614, + 504, + 649 + ], + "type": "text", + "content": "[44] Apoorv Vyas, Bowen Shi, Matthew Le, Andros Tjandra, Yi-Chiao Wu, Baishan Guo, Jiemin Zhang, Xinyue Zhang, Robert Adkins, William Ngan, et al. Audiobox: Unified audio generation with natural language prompts. arXiv preprint arXiv:2312.15821, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 656, + 504, + 691 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 656, + 504, + 691 + ], + "spans": [ + { + "bbox": [ + 106, + 656, + 504, + 691 + ], + "type": "text", + "content": "[45] Chengyi Wang, Sanyuan Chen, Yu Wu, Ziqiang Zhang, Long Zhou, Shujie Liu, Zhuo Chen, Yanqing Liu, Huaming Wang, Jinyu Li, et al. Neural codec language models are zero-shot text to speech synthesizers. arXiv preprint arXiv:2301.02111, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 699, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 699, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 699, + 504, + 723 + ], + "type": "text", + "content": "[46] Zhifei Xie and Changqiao Wu. Mini-omni: Language models can hear, talk while thinking in streaming. arXiv preprint arXiv:2408.16725, 2024." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 258 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 506, + 117 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 506, + 117 + ], + "type": "text", + "content": "[47] Detai Xin, Xu Tan, Kai Shen, Zeqian Ju, Dongchao Yang, Yuancheng Wang, Shinnosuke Takamichi, Hiroshi Saruwatari, Shujie Liu, Jinyu Li, et al. Rall-e: Robust codec language modeling with chain-of-thought prompting for text-to-speech synthesis. arXiv preprint arXiv:2404.03204, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 124, + 504, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 124, + 504, + 158 + ], + "spans": [ + { + "bbox": [ + 106, + 124, + 504, + 158 + ], + "type": "text", + "content": "[48] Shu-wen Yang, Po-Han Chi, Yung-Sung Chuang, Cheng-I Jeff Lai, Kushal Lakhotia, Yist Y Lin, Andy T Liu, Jiatong Shi, Xuankai Chang, Guan-Ting Lin, et al. Superb: Speech processing universal performance benchmark. Interspeech 2021, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 164, + 506, + 198 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 164, + 506, + 198 + ], + "spans": [ + { + "bbox": [ + 106, + 164, + 506, + 198 + ], + "type": "text", + "content": "[49] Neil Zeghidour, Alejandro Luebs, Ahmed Omran, Jan Skoglund, and Marco Tagliasacchi. Soundstream: An end-to-end neural audio codec. IEEE/ACM Transactions on Audio, Speech, and Language Processing, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 205, + 504, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 205, + 504, + 228 + ], + "spans": [ + { + "bbox": [ + 106, + 205, + 504, + 228 + ], + "type": "text", + "content": "[50] Heiga Zen, Viet Dang, Rob Clark, Yu Zhang, Ron J Weiss, Ye Jia, Zhifeng Chen, and Yonghui Wu. Libritts: A corpus derived from librispeech for text-to-speech. Interspeech 2019, 2019." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 235, + 504, + 258 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 235, + 504, + 258 + ], + "spans": [ + { + "bbox": [ + 106, + 235, + 504, + 258 + ], + "type": "text", + "content": "[51] Xin Zhang, Dong Zhang, Shimin Li, Yaqian Zhou, and Xipeng Qiu. Speechtokenizer: Unified speech tokenizer for speech large language models. ICLR, 2024." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 391, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 391, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 391, + 85 + ], + "type": "text", + "content": "A Technical Appendices and Supplementary Material" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 95, + 194, + 106 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 95, + 194, + 106 + ], + "spans": [ + { + "bbox": [ + 105, + 95, + 194, + 106 + ], + "type": "text", + "content": "A.1 Related Work" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 115, + 506, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 115, + 506, + 203 + ], + "spans": [ + { + "bbox": [ + 104, + 115, + 506, + 203 + ], + "type": "text", + "content": "Recent SLMs often require speech tokenization to conduct language modeling with the next prediction objective as the text LLMs. Unlike text, the speech signal is continuous and lengthy, making it difficult to derive proper speech tokenization for spoken language modeling. Common approaches may utilize self-supervised learned (SSL) speech models followed by quantization techniques to extract speech tokens [1, 11, 14, 21, 33]. In addition, audio or speech codec models have also been used for tokenization in recent SLMs [5, 6, 49, 51]. These models are designed for resynthesis, where the speech decoders are jointly learned with the encoders, making them easy to use for spoken language modeling." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 208, + 506, + 298 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 208, + 506, + 298 + ], + "spans": [ + { + "bbox": [ + 104, + 208, + 506, + 298 + ], + "type": "text", + "content": "With speech tokenization, GSLM [21, 32] first demonstrates the possibility of building an SLM that can generate speech. TWIST [11] further shows that SLM can benefit from initialization with the text-pretrained LLM. With regard to the huge success of text-only LLMs, recent work shifts the focus towards joint speech-text modeling [6, 11, 46]. Challenged by the modality gap between speech and text tokens, different techniques are introduced to facilitate joint modeling. Spirit LM [33] adopts an interleaving strategy; moshi [6] trains its own tokenizer with a reduced token frequency. Moreover, different patterns and strategies such as delayed or sequential generation are introduced for joint modeling, aiming for more reasonable and coherent speech outputs [46]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 301, + 506, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 301, + 506, + 368 + ], + "spans": [ + { + "bbox": [ + 104, + 301, + 506, + 368 + ], + "type": "text", + "content": "Despite the increasing demand of joint speech-text modeling [6, 33, 46], we do not find any work discussing the effectiveness of current speech tokenization for it. Moreover, the speech token is often derived with speech or audio-only data2. Nonetheless, we observe that recent work is trying to mitigate the modality gap by reducing frequency speech token or conducting additional training stage for text-speech alignment. This motivates us to design a speech tokenization that is directly aligned with its text counterpart, tackling the mismatch issue during the tokenization stage." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 372, + 506, + 472 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 372, + 506, + 472 + ], + "spans": [ + { + "bbox": [ + 104, + 372, + 506, + 472 + ], + "type": "text", + "content": "In the main text, we have mentioned that we utilize a specialized mechanism based on attention to extract and aggregate the encoder representations. We clarify that the text-speech cross-attention mechanism has also been used for fine-grained control of text-to-speech synthesis (TTS). More specifically, Chen and Rudnicky [3] propose content-style cross-attention to indicate their text-speech cross-attention mechanism that enables style transfer in TTS. Although both utilize specialized text-speech cross-attention mechanism, the design choices and problem formulations are completely different. We attribute of our main novelty to inventing a text-aligned speech tokenization and embedding for joint spoken language modeling, and the text-speech cross attention mechanism is considered and shown to be a clean, effective, and straightforward way of achieving it." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 116, + 710, + 448, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 710, + 448, + 722 + ], + "spans": [ + { + "bbox": [ + 116, + 710, + 448, + 722 + ], + "type": "text", + "content": "2An exception is CosyVoice [7]. We discuss it in Section 2 since it is related to our method." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 282, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 282, + 84 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 282, + 84 + ], + "type": "text", + "content": "A.2 Tackling the Vocabulary Mismatch" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "text", + "content": "The vocabulary mismatch problem lies in the fact that the vocabulary sets are different between the ASR and the LLM, and TASTE is aligned with the text transcription tokens from ASR. Consider that given a text transcription " + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "text", + "content": " and the vocabulary sets of ASR and LLM denoted as " + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\mathbb{V}^{\\mathrm{asr}}" + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\mathbb{V}^{\\mathrm{llm}}" + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "text", + "content": ", the ASR tokenized sequence " + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{v}^{\\mathrm{asr}} = [v_1^{\\mathrm{asr}}, v_2^{\\mathrm{asr}}, \\dots, v_N^{\\mathrm{asr}}]" + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "inline_equation", + "content": "v_i^{\\mathrm{asr}} \\in \\mathbb{V}^{\\mathrm{asr}}" + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "text", + "content": " and the LLM tokenized sequence " + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{v}^{\\mathrm{llm}} = [v_1^{\\mathrm{llm}}, v_2^{\\mathrm{llm}}, \\dots, v_M^{\\mathrm{llm}}]" + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "inline_equation", + "content": "v_i^{\\mathrm{llm}} \\in \\mathbb{V}^{\\mathrm{llm}}" + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "text", + "content": " can be different in terms of token ids and sequence lengths. Since the TASTE token and embedding are aligned with " + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{v}^{\\mathrm{asr}}" + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "text", + "content": ", we need to derive a method to align them with " + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{v}^{\\mathrm{llm}}" + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "text", + "content": " for text-aligned speech-text modeling. Notice that " + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{v}^{\\mathrm{asr}}" + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{v}^{\\mathrm{llm}}" + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "text", + "content": " both represent " + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "text", + "content": ", we propose to mitigate the issue through word-level grouping, averaging, and aligning, detailed in Algorithm 1. By crafting TASTE speech tokenization into the word level, we are able to align it with the text tokens of the LLM, denoted as " + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{q}}, \\tilde{\\mathbf{z}}" + }, + { + "bbox": [ + 104, + 106, + 506, + 240 + ], + "type": "text", + "content": ". In practice, we also adopt the word-level averaging technique during the TASTE tokenization training phase, ensuring that the word-level TASTE tokenization facilitates high-quality reconstruction." + } + ] + } + ], + "index": 1 + }, + { + "type": "code", + "bbox": [ + 106, + 297, + 506, + 718 + ], + "blocks": [ + { + "bbox": [ + 106, + 284, + 440, + 296 + ], + "lines": [ + { + "bbox": [ + 106, + 284, + 440, + 296 + ], + "spans": [ + { + "bbox": [ + 106, + 284, + 440, + 296 + ], + "type": "text", + "content": "Algorithm 1 Aligning TASTE with LLM Tokenization via Word-Level Techniques" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "lines": [ + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "spans": [ + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": "1: Initialization: Text transcription " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\pmb{v} = [\\mathrm{word}_1,\\mathrm{word}_2,\\dots ,\\mathrm{word}_W]" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " ASR tokens of the transcription " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "v^{\\mathrm{asr}} = [v_1^{\\mathrm{asr}},v_2^{\\mathrm{asr}},\\dots ,v_N^{\\mathrm{asr}}]" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " TASTE embedding " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\hat{z} = [\\hat{z}_1,\\hat{z}_2,\\dots ,\\hat{z}_N]" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " LLM tokens of the transcription " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "v^{\\mathrm{llm}} = [v_1^{\\mathrm{llm}},v_2^{\\mathrm{llm}},\\dots ,v_M^{\\mathrm{llm}}]" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " \n2: procedure WORDLEVELGROUPING(v, " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "v^{\\mathrm{asr}},\\hat{z},v^{\\mathrm{llm}})" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " \n3: Since " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "v^{\\mathrm{asr}}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " is a token sequence represents " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\mathbf{v}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " we can easily group it by words: \n4: " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\pmb{v}_{\\mathrm{grouped}}^{\\mathrm{asr}}\\leftarrow \\underbrace{\\left[(v_{1}^{\\mathrm{asr}},v_{2}^{\\mathrm{asr}},v_{3}^{\\mathrm{asr}})_{1},(v_{4}^{\\mathrm{asr}})_{2},\\ldots,(v_{N - 1}^{\\mathrm{asr}},v_{N}^{\\mathrm{asr}})_{W}\\right]}_{\\text{word}_1}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " ▷Group " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "v^{\\mathrm{asr}}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " by the words of " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\pmb{v}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " \n5: With the word-level grouping from " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "v^{\\mathrm{asr}}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " , we can group TASTE embedding " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\hat{z}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " as well: \n6: " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\hat{z}_{\\mathrm{grouped}}\\gets \\left[(\\hat{z}_1,\\hat{z}_2,\\hat{z}_3)_1,(\\hat{z}_4)_2,\\dots,(\\hat{z}_{N - 1},\\hat{z}_N)W\\right]" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " \n7: Finally, we can group " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "v^{\\mathrm{llm}}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " following the similar procedure of grouping " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "v^{\\mathrm{asr}}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " . \n8: " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\pmb{v}_{\\mathrm{grouped}}^{\\mathrm{llm}}\\leftarrow \\underbrace{\\left[(v_{1}^{\\mathrm{llm}},v_{2}^{\\mathrm{llm}})_{1},(v_{3}^{\\mathrm{llm}},v_{4}^{\\mathrm{llm}})_{2},\\dots,(v_{M - 2}^{\\mathrm{llm}},v_{M - 1}^{\\mathrm{llm}},v_{M}^{\\mathrm{llm}})_{W}\\right]}_{\\text{word}_2}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " \n9: Due to the vocabulary mismatch, the grouping of " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "v^{\\mathrm{llm}}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " is different from " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "v^{\\mathrm{asr}}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " , " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\hat{z}_{\\mathrm{grouped}}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " \n10: end procedure \n11: procedure WORDLEVELAVERAGING(" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\hat{z}_{\\mathrm{grouped}}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": ") \n12: " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\bar{z}\\gets []" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " ▷ Initialize a new sequence \n13: for word group index " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "i\\gets 1" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " do \n14: word group " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "(\\hat{z}_j,\\dots,\\hat{z}_k)\\gets \\hat{z}_{\\mathrm{grouped}}[i]" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " \n15: " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\bar{z}_{[j:k]}\\gets \\text{Average} ((\\hat{z}_j,\\dots,\\hat{z}_k))" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " ▷Average the word group \n16: append " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\bar{z}_{[j:k]}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\bar{z}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " \n17: end for \n18: Resulting in word-level TASTE embedding " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\bar{z}\\in \\mathbb{R}^{W\\times d_z}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " , where " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " is the word length of " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "V" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " \n19: end procedure \n20: procedure ALIGNWORDLEVELEMBEDGINGWITHLLM( " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\bar{z},v_{\\mathrm{grouped}}^{\\mathrm{llm}}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " ) \n21: " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\tilde{z}\\gets []" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " ▷ Initialize a new sequence \n22: for word group index " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "i\\gets 1" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "W" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " do \n23: word group " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "(v_j^{\\mathrm{llm}},\\dots,v_k^{\\mathrm{llm}})\\gets v_{\\mathrm{grouped}}[i]" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " \n24: " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "M\\gets \\text{Length}((v_j^{\\mathrm{llm}},\\dots,v_k^{\\mathrm{llm}}))" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " ▷Get the length of the word group. \n25: for " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "m\\gets 1" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "M" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " do ▷add " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "M\\times \\bar{z}[i]" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " into the aligned sequence " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\tilde{z}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " \n26: append " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\tilde{z}[i]" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\tilde{z}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " \n27: end for \n28: end for \n29: end procedure \n30: return The LLM-aligned word-level TASTE embedding " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\tilde{z}" + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "text", + "content": " and its codes form " + }, + { + "bbox": [ + 106, + 297, + 506, + 718 + ], + "type": "inline_equation", + "content": "\\tilde{\\boldsymbol{q}}" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_body" + } + ], + "index": 3, + "sub_type": "algorithm" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 202, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 202, + 83 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 202, + 83 + ], + "type": "text", + "content": "A.3 Training Details" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 95, + 506, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 95, + 506, + 173 + ], + "spans": [ + { + "bbox": [ + 104, + 95, + 506, + 173 + ], + "type": "text", + "content": "We separate the training process into the two phases: deriving TASTE tokenization and conducting spoken language modeling with TASTE. In the tokenization phase, only the Aggregator, Quantizer, and the UnitDecoder is trainable. We use the Adam optimizer and the learning rate is set to 0.0016. The batch size is set to 160 seconds on each of the 8 NVIDIA A6000 GPUs we used. Note that in the first 2 epochs the quantization is not applied. From the beginning of the third epoch, quantization is applied and the Quantizer starts to be updated. We train the TASTE tokenizer for 5 epochs, which takes about 2 days for learning, with the learning rate gradually decayed." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 177, + 504, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 177, + 504, + 244 + ], + "spans": [ + { + "bbox": [ + 104, + 177, + 504, + 244 + ], + "type": "text", + "content": "As for the spoken language modeling training phase, we use the AdamW optimizer, the Consine scheduler with the learning rate set to 1e-5. We use 8 Nvidia A6000 GPUs for training. The total batch size summation over the GPUs is set to 768 samples with the gradient accumulation steps set to 2. To reduce the memory overhead and the computational cost, we employ bfloat16 mixed precision during training. Tools such as DeepSpeed [36] and Liger Kernel [13] are also applied to speed up the fine-tuning process." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 263, + 211, + 274 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 263, + 211, + 274 + ], + "spans": [ + { + "bbox": [ + 105, + 263, + 211, + 274 + ], + "type": "text", + "content": "A.4 Evaluation Details" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 285, + 221, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 285, + 221, + 297 + ], + "spans": [ + { + "bbox": [ + 105, + 285, + 221, + 297 + ], + "type": "text", + "content": "A.4.1 Human Evaluation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 308, + 504, + 340 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 308, + 504, + 340 + ], + "spans": [ + { + "bbox": [ + 104, + 308, + 504, + 340 + ], + "type": "text", + "content": "We conduct human listening tests through Amazon Mechanical Turk. In each experiment, we randomly select the same 20 samples from each method; and for each sample we collect more than 10 evaluation scores across different human evaluators." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 359, + 506, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 359, + 506, + 404 + ], + "spans": [ + { + "bbox": [ + 104, + 359, + 506, + 404 + ], + "type": "text", + "content": "MUSHRA In Table 1, we have shown our result of the MUSRHA human listening test [40]. Following [51], we conduct the evaluation with a hidden reference but without a lowerpass-filtered anchor. We instruct evaluators to rate the perceptual quality of the given samples with respect to the ground truth on a scale of 1 to 100." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 422, + 504, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 422, + 504, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 422, + 504, + 456 + ], + "type": "text", + "content": "Speech Continuation MOS In Table 2, we mention that we have conducted the human listening test to evaluate the overall performance of the speech continuations. Here, we present the instruction for human speech continuation MOS evaluation as follows:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 121, + 470, + 369, + 482 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 470, + 369, + 482 + ], + "spans": [ + { + "bbox": [ + 121, + 470, + 369, + 482 + ], + "type": "text", + "content": "Instruction for Human Speech Continuation MOS Evaluation" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 120, + 491, + 488, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 491, + 488, + 512 + ], + "spans": [ + { + "bbox": [ + 120, + 491, + 488, + 512 + ], + "type": "text", + "content": "In this test, each sample will contain a short audio clip called \"prompt\" (3 seconds) and a longer audio clip called \"prompt+continuation\" (" + }, + { + "bbox": [ + 120, + 491, + 488, + 512 + ], + "type": "inline_equation", + "content": "\\sim" + }, + { + "bbox": [ + 120, + 491, + 488, + 512 + ], + "type": "text", + "content": "15 seconds)." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 120, + 512, + 488, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 512, + 488, + 533 + ], + "spans": [ + { + "bbox": [ + 120, + 512, + 488, + 533 + ], + "type": "text", + "content": "You will be asked to rate the speech quality of the \"prompt+continuation\" audio clip, specifically focus on the \"continuation\" part." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 120, + 533, + 488, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 533, + 488, + 552 + ], + "spans": [ + { + "bbox": [ + 120, + 533, + 488, + 552 + ], + "type": "text", + "content": "The rating should be based on how likely you think that the long audio is a proper continuation of the \"prompt\" audio." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 120, + 552, + 347, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 552, + 347, + 563 + ], + "spans": [ + { + "bbox": [ + 120, + 552, + 347, + 563 + ], + "type": "text", + "content": "Specifically, the rating should be based on the following scale:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 120, + 582, + 400, + 632 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 120, + 582, + 354, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 582, + 354, + 592 + ], + "spans": [ + { + "bbox": [ + 120, + 582, + 354, + 592 + ], + "type": "text", + "content": "1: Bad - The \"continuation\" is not distinguishable or not natural." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 121, + 592, + 312, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 592, + 312, + 602 + ], + "spans": [ + { + "bbox": [ + 121, + 592, + 312, + 602 + ], + "type": "text", + "content": "2: Poor - The \"continuation\" is " + }, + { + "bbox": [ + 121, + 592, + 312, + 602 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 121, + 592, + 312, + 602 + ], + "type": "text", + "content": " distinguishable." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 121, + 602, + 351, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 602, + 351, + 612 + ], + "spans": [ + { + "bbox": [ + 121, + 602, + 351, + 612 + ], + "type": "text", + "content": "3: Fair - The \"continuation\" is " + }, + { + "bbox": [ + 121, + 602, + 351, + 612 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 121, + 602, + 351, + 612 + ], + "type": "text", + "content": " distinguishable and natural." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 121, + 612, + 357, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 612, + 357, + 622 + ], + "spans": [ + { + "bbox": [ + 121, + 612, + 357, + 622 + ], + "type": "text", + "content": "4: Good - The \"continuation\" is " + }, + { + "bbox": [ + 121, + 612, + 357, + 622 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 121, + 612, + 357, + 622 + ], + "type": "text", + "content": " distinguishable and natural." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 121, + 622, + 400, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 622, + 400, + 632 + ], + "spans": [ + { + "bbox": [ + 121, + 622, + 400, + 632 + ], + "type": "text", + "content": "5: Excellent - The \"continuation\" is distinguishable, meaningful, and natural." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 120, + 651, + 405, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 651, + 405, + 662 + ], + "spans": [ + { + "bbox": [ + 120, + 651, + 405, + 662 + ], + "type": "text", + "content": "Distinguishable means that the words in the \"continuation\" is distinguishable." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 120, + 662, + 488, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 662, + 488, + 682 + ], + "spans": [ + { + "bbox": [ + 120, + 662, + 488, + 682 + ], + "type": "text", + "content": "Natural means that the \"continuation\" sounds like a real human voice and a natural continuation of the prompt without considering the content of the speech." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 120, + 682, + 488, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 682, + 488, + 702 + ], + "spans": [ + { + "bbox": [ + 120, + 682, + 488, + 702 + ], + "type": "text", + "content": "Meaningful means that you can not only distinguish the words but also understand the meaning of the whole \"prompt+continuation\"." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 263, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 263, + 83 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 263, + 83 + ], + "type": "text", + "content": "A.4.2 GPT-4o for MOS Evaluation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 91, + 504, + 146 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 91, + 504, + 146 + ], + "spans": [ + { + "bbox": [ + 104, + 91, + 504, + 146 + ], + "type": "text", + "content": "As introduced in Section 4.2.1, we use GPT-4o to assign MOS scores to the speech continuation results. Here, we describe the detailed procedure. First, whisper-large-v3 is applied to transcribe the generated speech. Then, given the transcription, the text content from the prompt audio, and the instruction template, GPT-4o can produce a score between 1 and 5. The instruction template is provided below:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 155, + 316, + 167 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 155, + 316, + 167 + ], + "spans": [ + { + "bbox": [ + 121, + 155, + 316, + 167 + ], + "type": "text", + "content": "Instruction Prompt for GPT-4o MOS Evaluation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 120, + 176, + 471, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 176, + 471, + 207 + ], + "spans": [ + { + "bbox": [ + 120, + 176, + 471, + 207 + ], + "type": "text", + "content": "The task is evaluating the relevance and likelihood of the predicted text continuation, given the text prompt. You should also consider whether the meaning of the text continuation is making sense. The text prompt is:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 121, + 207, + 170, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 207, + 170, + 217 + ], + "spans": [ + { + "bbox": [ + 121, + 207, + 170, + 217 + ], + "type": "text", + "content": "{\"prompt\"}" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 122, + 217, + 273, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 217, + 273, + 225 + ], + "spans": [ + { + "bbox": [ + 122, + 217, + 273, + 225 + ], + "type": "text", + "content": ", and the text continuation is :" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 122, + 226, + 175, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 226, + 175, + 236 + ], + "spans": [ + { + "bbox": [ + 122, + 226, + 175, + 236 + ], + "type": "text", + "content": "{\"content\"}" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 121, + 246, + 462, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 246, + 462, + 266 + ], + "spans": [ + { + "bbox": [ + 121, + 246, + 462, + 266 + ], + "type": "text", + "content": "You must give an overall rating from 1 to 5. The rating guideline is as below:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 121, + 266, + 483, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 266, + 483, + 277 + ], + "spans": [ + { + "bbox": [ + 121, + 266, + 483, + 277 + ], + "type": "text", + "content": "1: The text continuation is very unlikely and irrelevant to the text prompt." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 121, + 277, + 466, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 277, + 466, + 297 + ], + "spans": [ + { + "bbox": [ + 121, + 277, + 466, + 297 + ], + "type": "text", + "content": "2: The text continuation is unlikely and marginally relevant to the text prompt." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 121, + 297, + 457, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 297, + 457, + 316 + ], + "spans": [ + { + "bbox": [ + 121, + 297, + 457, + 316 + ], + "type": "text", + "content": "3: The text continuation is moderately likely and relevant to the text prompt." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 121, + 317, + 441, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 317, + 441, + 327 + ], + "spans": [ + { + "bbox": [ + 121, + 317, + 441, + 327 + ], + "type": "text", + "content": "4: The text continuation is likely and relevant to the text prompt." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 121, + 327, + 408, + 336 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 327, + 408, + 336 + ], + "spans": [ + { + "bbox": [ + 121, + 327, + 408, + 336 + ], + "type": "text", + "content": "5: The text continuation is very likely and highly relevant." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 121, + 337, + 389, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 337, + 389, + 346 + ], + "spans": [ + { + "bbox": [ + 121, + 337, + 389, + 346 + ], + "type": "text", + "content": "You should take the following steps to provide the score:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 121, + 347, + 408, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 347, + 408, + 356 + ], + "spans": [ + { + "bbox": [ + 121, + 347, + 408, + 356 + ], + "type": "text", + "content": "First: briefly analyze the sample with the above definition." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 121, + 357, + 457, + 366 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 357, + 457, + 366 + ], + "spans": [ + { + "bbox": [ + 121, + 357, + 457, + 366 + ], + "type": "text", + "content": "Second: MUST follow the output format as: I would rate the score as _" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 396, + 212, + 407 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 396, + 212, + 407 + ], + "spans": [ + { + "bbox": [ + 105, + 396, + 212, + 407 + ], + "type": "text", + "content": "A.5 Additional Results" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 416, + 297, + 429 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 416, + 297, + 429 + ], + "spans": [ + { + "bbox": [ + 105, + 416, + 297, + 429 + ], + "type": "text", + "content": "A.5.1 Details on SALMON and StoryCloze" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 436, + 504, + 459 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 436, + 504, + 459 + ], + "spans": [ + { + "bbox": [ + 104, + 436, + 504, + 459 + ], + "type": "text", + "content": "Our detailed results on SALMON and StoryCloze are reported in Table 4. The introductions of the two benchmarks—SALMON and StoryCloze—are described below." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 470, + 504, + 537 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 470, + 504, + 537 + ], + "spans": [ + { + "bbox": [ + 104, + 470, + 504, + 537 + ], + "type": "text", + "content": "SALMON for Acoustic Evaluation SALMON offers a comprehensive set of metrics designed to evaluate SLMs in multiple dimensions. In summary, each test sample consists of a positive sample and a negative sample. The negative sample differs from the positive sample by having some segments altered. These alterations include changes in speaker, gender, environment (e.g., room acoustics), or sentiment in the middle of the utterance. The SLM serves as an anomaly detector that aims to distinguish between the pairs of positive and negative samples. The distinction is based on" + } + ] + } + ], + "index": 19 + }, + { + "type": "table", + "bbox": [ + 106, + 606, + 504, + 719 + ], + "blocks": [ + { + "bbox": [ + 104, + 562, + 504, + 606 + ], + "lines": [ + { + "bbox": [ + 104, + 562, + 504, + 606 + ], + "spans": [ + { + "bbox": [ + 104, + 562, + 504, + 606 + ], + "type": "text", + "content": "Table 4: The evaluation results on SALMON and StoryCloze of different SLMs, and BG means background. We report likelihood-based accuracy on SALMON (acoustic aspect) and StoryCloze (semantic aspect). The baseline (S3 token) is conducted by joint speech-text modeling with the S3 token as speech tokenization." + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 606, + 504, + 719 + ], + "lines": [ + { + "bbox": [ + 106, + 606, + 504, + 719 + ], + "spans": [ + { + "bbox": [ + 106, + 606, + 504, + 719 + ], + "type": "table", + "html": "
METHODLoRASALMON (ACOUSTIC CONSISTENCY)STORYCLOZE
SentimentSpeakerGenderRoomBG (domain)BG (rand.)sSC / tSC
Previous Work
TWIST 1.3B ([11])X61.5±3.469.0±3.369.5±3.359.0±3.555.5±3.560.5±3.552.4±0.8 / 70.6±0.7
TWIST 7B ([11])X61.5±3.471.0±3.270.0±3.262.0±3.455.5±3.560.5±3.555.3±0.8 / 74.1±0.7
Spirit LM ([33])X54.5±3.569.5±3.367.0±3.354.5±3.553.5±3.555.5±3.561.0±0.8 / 82.9±0.6
Spirit LM Expr. ([33])X73.5±3.181.0±2.885.0±2.554.5±3.556.0±3.564.0±3.456.9±0.8 / 75.4±0.7
Ours
Baseline (S3 token)49.5±3.548.8±3.548.8±3.549.5±3.555.3±3.549.5±3.554.4±0.8 / 63.0±0.8
TASLM 1B (token)59.0±3.568.0±3.370.5±3.261.0±3.452.0±3.554.0±3.564.2±0.8 / 88.9±0.5
TASLM 1B (embedding)57.5±3.567.0±3.375.5±3.050.0±3.547.0±3.549.0±3.564.0±0.8 / 89.5±0.5
", + "image_path": "082bb6aa5d9a46479ef69e67a1c255233b2ead7447bd21827e559bc7215a53a7.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "table_body" + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "content": "the likelihood score given by each SLM, which is then evaluated with the overall precision between the ground truth and the prediction." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 506, + 184 + ], + "type": "text", + "content": "StoryCloze for Semantic Evaluation To evaluate the SLMs' ability to comprehend semantic coherence and logical reasoning, we employ the spoken version of StoryCloze test (sSC) and the Topic StoryCloze test (tSC) assembled by ([11]). Assessment of narrative understanding involves presenting a four-sentence story setup, followed by two possible endings. These tasks require the model to select the most appropriate conclusion, thereby testing its grasp of causal and temporal relationships within a narrative. Similarly to SALMON, we measure the accuracy of the distinctions based on the likelihood scores." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 195, + 272, + 208 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 195, + 272, + 208 + ], + "spans": [ + { + "bbox": [ + 105, + 195, + 272, + 208 + ], + "type": "text", + "content": "A.5.2 Report of Standard Deviations" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 213, + 478, + 227 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 213, + 478, + 227 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 478, + 227 + ], + "type": "text", + "content": "We report the standard deviations of our tables in the main text to allow further investigation." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 106, + 253, + 504, + 384 + ], + "blocks": [ + { + "bbox": [ + 194, + 243, + 415, + 253 + ], + "lines": [ + { + "bbox": [ + 194, + 243, + 415, + 253 + ], + "spans": [ + { + "bbox": [ + 194, + 243, + 415, + 253 + ], + "type": "text", + "content": "Table 5: Results with standard deviations of Table 1" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 253, + 504, + 384 + ], + "lines": [ + { + "bbox": [ + 106, + 253, + 504, + 384 + ], + "spans": [ + { + "bbox": [ + 106, + 253, + 504, + 384 + ], + "type": "table", + "html": "
MethodBitrateQUALITYSIMILARITY
WER ↓UTMOSDNSMOSViSQOLDrtn. Con.Spkr. Sim.MUSHRA
Ground Truth256k2.1%±0.074.09±0.323.84±0.26---76.6±15.9
Encodec [5]15005.1%±0.111.58±0.343.26±0.243.46±0.280.94±0.0030.63±0.10-
30002.6%±0.082.35±0.533.48±0.253.81±0.270.96±0.0020.78±0.0725.6±18.6
SpeechTokenizer [51]5005.2%±0.111.27±0.052.99±0.172.80±0.240.94±0.0030.35±0.09-
20003.0%±0.083.56±0.433.60±0.283.65±0.220.97±0.0020.80±0.0653.9±22.9
40002.5%±0.083.90±0.363.76±0.284.03±0.170.98±0.0020.92±0.04-
Mimi [6]10003.1%±0.093.60±0.373.60±0.303.62±0.260.96±0.0020.82±0.0667.6±19.8
S3 token (topline) [7]6003.0%±0.094.18±0.273.90±0.243.30±0.260.96±0.0020.82±0.0970.2±17.0
Text-only (baseline)~505.9%±0.114.31±0.164.11±0.222.44±0.230.57±0.0060.78±0.0942.6±27.1
TASTE (ours)~1504.4%±0.114.29±0.184.10±0.223.05±0.260.91±0.0030.80±0.0868.3±17.1
", + "image_path": "c37da4e246327bdeb5e0364e3b1c15c3f51d38c47be643ae90b43b1c67a0812a.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + }, + { + "type": "table", + "bbox": [ + 106, + 418, + 504, + 548 + ], + "blocks": [ + { + "bbox": [ + 194, + 408, + 416, + 418 + ], + "lines": [ + { + "bbox": [ + 194, + 408, + 416, + 418 + ], + "spans": [ + { + "bbox": [ + 194, + 408, + 416, + 418 + ], + "type": "text", + "content": "Table 6: Results with standard deviations of Table 2." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 106, + 418, + 504, + 548 + ], + "lines": [ + { + "bbox": [ + 106, + 418, + 504, + 548 + ], + "spans": [ + { + "bbox": [ + 106, + 418, + 504, + 548 + ], + "type": "table", + "html": "
MethodFinetuned / base parametersCONTINUATIONLIKELIHOOD
GPT-4oUTMOSHumanSALMONStoryClozeOverall
Cascade
Cascade (LLaMA3.2-1Bα)-3.15±1.274.25±0.224.00±1.28---
Cascade (LLaMA2-7Bβ)-3.43±1.274.25±0.253.98±1.29---
Spoken LMs
TWIST 1.3B [11]1.3B / 1.3Bθ1.48±0.703.25±0.481.95±1.0162.5±1.461.5±0.562.0±0.7
TWIST 7B [11]7B / 7Bγ1.44±0.703.27±0.522.04±0.9163.4±1.464.7±0.564.1±0.7
Spirit LM [33]7B / 7Bβ2.79±1.063.41±0.192.38±0.8159.1±1.472.0±0.565.6±0.7
Spirit LM Expr. [33]7B / 7Bβ1.90±1.033.40±0.302.41±0.9669.0±1.366.2±0.567.6±0.7
Baseline (S3 token)45M / 1.3Bα1.37±0.874.04±0.272.84±1.1150.2±1.458.7±0.654.5±0.8
TASLM 1B (token)45M / 1.3Bα3.08±1.374.07±0.283.93±1.3060.8±1.476.5±0.568.7±0.7
TASLM 1B (embed.)45M / 1.3Bα3.16±1.334.22±0.214.16±1.2057.7±1.476.7±0.567.2±0.7
", + "image_path": "1b178f39d74600bee181610c32c18474ca8adc9c9bfb2aa49ac9649a63e89cf4.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 109, + 549, + 294, + 557 + ], + "lines": [ + { + "bbox": [ + 109, + 549, + 294, + 557 + ], + "spans": [ + { + "bbox": [ + 109, + 549, + 294, + 557 + ], + "type": "text", + "content": "Base models: " + }, + { + "bbox": [ + 109, + 549, + 294, + 557 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 109, + 549, + 294, + 557 + ], + "type": "text", + "content": " LLaMA3.2-1B, " + }, + { + "bbox": [ + 109, + 549, + 294, + 557 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 109, + 549, + 294, + 557 + ], + "type": "text", + "content": " LLaMA2-7B, " + }, + { + "bbox": [ + 109, + 549, + 294, + 557 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 109, + 549, + 294, + 557 + ], + "type": "text", + "content": " LLaMA-7B, " + }, + { + "bbox": [ + 109, + 549, + 294, + 557 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 109, + 549, + 294, + 557 + ], + "type": "text", + "content": " OPT-1.3B" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 197, + 594, + 414, + 696 + ], + "blocks": [ + { + "bbox": [ + 194, + 582, + 416, + 593 + ], + "lines": [ + { + "bbox": [ + 194, + 582, + 416, + 593 + ], + "spans": [ + { + "bbox": [ + 194, + 582, + 416, + 593 + ], + "type": "text", + "content": "Table 7: Results with standard deviations of Table 3." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 197, + 594, + 414, + 696 + ], + "lines": [ + { + "bbox": [ + 197, + 594, + 414, + 696 + ], + "spans": [ + { + "bbox": [ + 197, + 594, + 414, + 696 + ], + "type": "table", + "html": "
MethodModeWeb Q.LLaMA-Q.
Mini-Omni 0.5B(T→T)T21.3±0.939.0±2.8
Mini-Omni 0.5B [46]T+A4.5±0.511.6±1.8
Helium 7B (text)T32.3±1.075.0±2.5
Moshi 7B [6]T+A26.6±1.062.3±2.8
LLaMA3.1-8B-InstructT60.4±1.171.7±2.6
Llama-Omni-8B [9]T+A35.5±1.167.3±2.7
LLaMA3.2-1B†T24.0±0.951.0±2.9
TASLM 1B (embed.)†T+A27.1±1.057.6±2.9
", + "image_path": "1a8ccabf79ad97b3b3d47d64ea800c7d4361d2c185e0e4c15151ce2ffc4f4b4e.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 200, + 697, + 374, + 708 + ], + "lines": [ + { + "bbox": [ + 200, + 697, + 374, + 708 + ], + "spans": [ + { + "bbox": [ + 200, + 697, + 374, + 708 + ], + "type": "text", + "content": "We apply few-shot learning to facilitate question answering." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07079/119db10f-4b86-43b8-bde3-42f792f5a6e7_content_list.json b/data/2025/2504_07xxx/2504.07079/119db10f-4b86-43b8-bde3-42f792f5a6e7_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..38f8b23c39d7e462ac0bf8eac0db8ba3006ea8c9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/119db10f-4b86-43b8-bde3-42f792f5a6e7_content_list.json @@ -0,0 +1,5471 @@ +[ + { + "type": "text", + "text": "SkillWeaver: Web Agents can Self-Improve by Discovering and Honing Skills", + "text_level": 1, + "bbox": [ + 171, + 98, + 825, + 140 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Boyuan Zheng $^{1*}$ , Michael Y. Fatemi $^{2*}$ , Xiaolong Jin $^{3*}$ , Zora Zhiruo Wang $^{4}$ , Apurva Gandhi $^{4}$ , Yueqi Song $^{4}$ , Yu Gu $^{1}$ , Jayanth Srinivasa $^{5}$ , Gaowen Liu $^{5}$ , Graham Neubig $^{4}$ , Yu Su $^{1}$", + "bbox": [ + 181, + 165, + 887, + 213 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 The Ohio State University 2 University of Virginia 3 Purdue University", + "bbox": [ + 228, + 218, + 782, + 237 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "4 Carnegie Mellon University 5 Cisco Research", + "bbox": [ + 330, + 234, + 683, + 251 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{zheng.2372, su.809}@osu.edu", + "bbox": [ + 419, + 251, + 620, + 265 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 457, + 300, + 537, + 316 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To survive and thrive in complex environments, humans have evolved sophisticated self-improvement mechanisms through environment exploration, hierarchical abstraction of experiences into reusable skills, and collaborative construction of an ever-growing skill repertoire. Despite recent advancements, autonomous web agents still lack crucial self-improvement capabilities, struggling with procedural knowledge abstraction, skill refinement, and skill composition. In this work, we introduce SKILLWEAKER, a skill-centric framework that enables agents to self-improve by autonomously synthesizing reusable skills as APIs. Given a new website, the agent autonomously discovers skills, executes them for practice, and distills practice experiences into robust APIs. Iterative exploration continuously expands a library of lightweight, plug-and-play APIs, significantly enhancing the agent's capabilities. Experiments on WebArena and real-world websites demonstrate the efficacy of SKILLWEAKER, achieving relative success rate improvements of $31.8\\%$ and $39.8\\%$ , respectively. Additionally, APIs synthesized by strong agents substantially enhance weaker agents through transferable skills, yielding improvements of up to $54.3\\%$ on WebArena. These results demonstrate the effectiveness of honing diverse website interactions into APIs, which can be seamlessly shared among various web agents. $^{1}$", + "bbox": [ + 228, + 332, + 767, + 613 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 638, + 318, + 654 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "AI agents based on large language models (LLMs) that can browse the web (Deng et al., 2023; Zhou et al., 2024a; Zheng et al., 2024) or use computers (Xie et al., 2024) like humans are rapidly rising as a new frontier of AI research and application. Despite these promising opportunities, digital environments present substantial challenges due to their inherent complexity and diversity. Website environments are highly intricate, consisting of numerous interactive elements that create large action spaces. An even greater challenge lies in developing generalist web agents capable of generalizing to out-of-distribution task types and adapting to novel websites. Existing efforts have attempted to train web agents using large-scale trajectory datasets collected across diverse websites and task types (Li et al., 2024; Pahuja et al., 2025). However, these agents often struggle with overfitting to specific website structures and task distributions (Li et al., 2024; Zheng et al., 2024), reducing their ability to handle previously unseen environments effectively.", + "bbox": [ + 169, + 670, + 826, + 839 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "As an integral feat of human intelligence, self-improvement presents a promising solution to these challenges. Imagine a user visiting 'yelp.com' for the first time. In the beginning, she may not be familiar with the layout and functionalities provided by the website, so she", + "bbox": [ + 169, + 844, + 826, + 887 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.07079v1 [cs.AI] 9 Apr 2025", + "bbox": [ + 22, + 276, + 60, + 700 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal Contribution. Work done during Michael and Xiaolong's internship at OSU NLP Group.", + "bbox": [ + 189, + 895, + 815, + 910 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1All resources are available at https://github.com/OSU-NLP-Group/SkillWeaver.", + "bbox": [ + 192, + 910, + 718, + 922 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 948, + 503, + 959 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9be3566820824df0fb4afd10c6f034a3ea92106e97fbd989b5dc6eee49f0c717.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 173, + 99, + 344, + 191 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/19d15d6e7c216d3b323d7090dd8d06675893fb6ed06369fc58e17dcdccabb40d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 348, + 99, + 598, + 191 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/9f663991fd933a1bc51c4398502876b99a36e912bf48aed268974b20402c9bc7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 599, + 99, + 812, + 191 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/a1cb53812cb12cd2115f6f9801c2edca9d085c54977c62ea4020be2c8ad80798.jpg", + "image_caption": [ + "Figure 1: An overview of the SKILLWEAKER framework. The Skill Proposal module (Stage I) identifies novel skills to practice based on observations of the environment and available APIs in the skill library. For each proposed skill, the agent executes it to generate trajectories, which are later evaluated by the reward model. If successful, the trajectory is utilized to synthesize an API (Stage II). To ensure robustness, the synthesized API undergoes testing with automatically generated test cases and debugging within the Skill Honing module (Stage III)." + ], + "image_footnote": [], + "bbox": [ + 173, + 199, + 344, + 271 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/4da1709e76cf9c3e41c7e70097e78dfad4e24e0974eb255b1fc58e7459842f3d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 346, + 199, + 596, + 271 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/ac4b87be6827fc0b422426f9eb8310b6c709f5521c1d80c66dd9b72bf4771fbe.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 599, + 199, + 808, + 271 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "would explore the website and think about what tasks can be done. As she does more tasks on the website (e.g., searching for restaurants with various filters), common routines become memorized procedural knowledge (e.g., searching for high-end Korean restaurants entails three steps: type 'Korean' in the top search bar, click the 'Search' button shaped like a magnifier, and click the $$$$$' button on the search result page). Such routines abstracted from experiences essentially become high-level actions or skills that can be effortlessly summoned and carried out instead of re-learning things on the fly; they can even be composed into more complex routines, e.g., making a reservation at a restaurant involves first finding it. Similarly, through self-improvement, agents can build conceptual maps of the website environment, accumulate procedural knowledge as skills, compose simple skills into more complex ones, and leverage these learned skills to enhance their decision-making processes.", + "bbox": [ + 169, + 409, + 826, + 566 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A few recent studies have tried to capture some facets of humans' self-improvement capability. Traditional methods typically store skills implicitly through action trajectories, primarily leveraging them as demonstrations for in-context learning (Murty et al., 2024b) or fine-tuning (Murty et al., 2024a; Su et al., 2025; Pahuja et al., 2025). Although these trajectory-based approaches can be effective, they struggle to explicitly abstract reusable procedural knowledge, resulting in heavy training demands and limited generalization to new websites and tasks. Furthermore, continuously updating models with new trajectories introduces significant concerns such as catastrophic forgetting and sensitivity to website changes. Additionally, storing and sharing extensive memory-intensive trajectory data also poses practical challenges for knowledge transfer among agents. Efforts like Agent Workflow Memory (Wang et al., 2024e) and ICAL (Sarch et al., 2024) take this a step further by generating abstract, reusable routines. However, its natural language-based routines pose challenges for formal verification and precise composition into new workflows. Both methods also focus on online learning with access to test queries or offline learning requiring high-quality annotated demonstrations. Instead, we focus on more challenging autonomous exploration where agents propose novel tasks without external supervision.", + "bbox": [ + 169, + 569, + 826, + 796 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To this end, we propose SKILLWEAKER, a skill-centric framework that enables web agents to autonomously self-improve by exploring website environments and synthesizing reusable, structured skills (§ 2). Our framework leverages the ability of LLMs to iteratively curate APIs based on feedback from the interactive environment (Wang et al., 2023a; 2024d). Specifically, SKILLWEAKER comprises a three-stage pipeline (Figure 1): (1) systematic exploration of website environments to identify potential skills, (2) practicing and converting these skills", + "bbox": [ + 169, + 800, + 828, + 887 + ], + "page_idx": 1 + }, + { + "type": "page_footnote", + "text": "For example, screenshot and HTML files consume approximately 0.3 GB per trajectory with an average of 7.3 actions in Mind2Web (Deng et al., 2023), making them hard to transfer on the fly.", + "bbox": [ + 169, + 896, + 823, + 925 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 491, + 946, + 504, + 959 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "into robust, reusable APIs, and (3) testing and debugging to ensure API reliability during inference.", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Results on WebArena (Zhou et al., 2024a) demonstrate substantial performance improvements after equipping web agents with APIs synthesized by SKILLWEAVER. We observe relative gains in success rate from $25\\%$ to $38\\%$ with just 160 iterations of the three-stage pipeline. Further experiments with weaker agents revealed even more dramatic improvements, ranging from $40\\%$ to $130\\%$ . These results underscore two insights: (1) synthesized APIs can substantially enhance agent performance, and (2) weaker agents particularly benefit from APIs generated by more powerful counterparts, effectively distilling advanced knowledge into plug-and-play modules. To further validate the practicality of our method, we conduct experiments on real-world websites using tasks sampled from Online-Mind2Web (Xue et al., 2025; Deng et al., 2023). SKILLWEAVER achieved a $39.8\\%$ relative improvement in success rate, demonstrating its effectiveness in complex, dynamic web environments.", + "bbox": [ + 169, + 138, + 826, + 292 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 SkillWeaver", + "text_level": 1, + "bbox": [ + 171, + 316, + 316, + 333 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Our objective is to develop a framework that enables web agents to autonomously discover skills and construct a continuously expanding skill library in the form of APIs. In our work, \"API\" refers to a Python function containing Playwright code for a browser automation, rather than a (for example) REST API that handles HTTP requests. As illustrated in Figure 1, the exploration pipeline consists of three key modules: (1) Skill Proposal: Identifying new skills (e.g., a short description such as \"Identify pill using pill identifier\") for the agent to practice, facilitating the continuous discovery of relevant functionalities; (2) Skill Synthesis: Practicing the proposed skills and converting practiced trajectories into reusable Python functions, encapsulating learned behaviors into structured APIs; (3) Skill Honing: Testing and debugging the synthesized APIs using feedback from the environment and the reward model to ensure reliability.", + "bbox": [ + 169, + 351, + 826, + 505 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Stage I: Skill Proposal", + "text_level": 1, + "bbox": [ + 171, + 526, + 377, + 542 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Website environments encompass numerous underlying functionalities that require systematic exploration. LLMs have been employed as automatic curriculum (Tajwar et al., 2025) for exploring open-ended environments, like Minecraft (Wang et al., 2023a; Du et al., 2023), TextWorld (Song et al., 2024a), and household settings (Du et al., 2023). Similarly, we leverage LLMs as an automatic curriculum for discovering website functionalities, leveraging their internet-scale knowledge and HTML interpretation capabilities. To facilitate the understanding of website environments, we provide LLMs with detailed webpage observations, including screenshots, website names, URLs, and accessibility trees. Accessibility trees are particularly useful for understanding less familiar websites, such as CMS in WebArena, because they provide comprehensive structural and functional information.", + "bbox": [ + 169, + 554, + 828, + 695 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In contrast to existing approaches (Zhou et al., 2024b; Murty et al., 2024b;a; Song et al., 2024a), we emphasize skill diversity by explicitly prompting LLMs to propose novel and reusable skills beyond the current skill repertoire. To ensure efficient exploration and high success rates in subsequent skill practice and API synthesis stages, we specifically target short-horizon, reusable skills that can be completed within a single API call. Our approach follows a curriculum progression from simple to complex skill compositions. The LLM is instructed to propose the following three types of tasks (Prompts in Appendix § A.1):", + "bbox": [ + 169, + 700, + 825, + 800 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Procedural Tasks. Procedural tasks require a sequence of atomic actions to achieve higher-level process automation objectives. Each procedural task corresponds to a workflow comprising actions that can be generalized to complete similar tasks. For example, the task identifying pills based on imprint and color involves a workflow where values are entered into textboxes, followed by clicking a submit button to finalize the search operation.", + "bbox": [ + 169, + 804, + 826, + 876 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Navigational Tasks. Navigational tasks involve systematically exploring various sections or pages within a website. Through these tasks, agents can construct conceptual maps that capture the functionality of various webpages within the website. Examples include", + "bbox": [ + 169, + 881, + 825, + 925 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "navigating to the \"customer reviews\" section on a product management website and accessing individual user profiles.", + "bbox": [ + 169, + 103, + 823, + 132 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Information-Seeking Tasks. Information-seeking tasks involve scraping detailed data from webpages (e.g., extracting all commits from a GitHub repository). Enumerating all available items on webpages, such as comprehensive lists of reviews or faculty members, is typically challenging and requires extensive, repetitive actions. To address this challenge, generating specialized scraping APIs enables agents to efficiently retrieve all relevant information.", + "bbox": [ + 169, + 138, + 823, + 209 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "2.2 Stage II: Skill Synthesis", + "text_level": 1, + "bbox": [ + 171, + 234, + 392, + 250 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This module aims at automatically generating robust and reusable APIs, consisting of the following three components: Skill Practice, Reward Model, and API Synthesis. A critical challenge in this process is ensuring the robustness of the generated APIs, as each individual module may introduce errors. We address this challenge through curriculum progression—advancing from simple to complex skill compositions—which improves accuracy since modules perform better on simpler tasks requiring fewer steps. Additionally, the subsequent Skill Honing module (Stage III) validates APIs through unit testing and debugging to ensure reliability.", + "bbox": [ + 169, + 263, + 826, + 377 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Skill Practice. Given a task proposed in Stage I, the agent takes actions to complete it. For procedural and navigational tasks, we leverage a base agent to generate actions to repeatedly attempt to complete tasks. The agent repeatedly attempts to complete the tasks proposed in the previous section, using the reward model to determine successful completion. For information-seeking tasks, we synthesize data extraction code to gather target information from webpages, incorporating any necessary navigation steps, such as retrieving historical orders or customer information.", + "bbox": [ + 169, + 381, + 823, + 479 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Reward Model. LLMs have demonstrated effectiveness as evaluators of correctness, both for answer accuracy (Zheng et al., 2023) and for determining whether action sequences successfully complete task instructions (Pan et al., 2024; Zhuge et al., 2024). We prompt an LLM to provide reward signals indicating task completion success with the following observations in the context: (1) Task description, (2) Action trajectory comprising a sequence of actions with screenshots and corresponding descriptions, (3) Environmental feedback, including code execution outcomes and observable website changes. Further details can be found in Appendix § A.5.", + "bbox": [ + 169, + 486, + 826, + 599 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "API Synthesis. This module encapsulates the agent's actions from successful trajectories into a reusable and generalized API. This is performed by creating a string representation of each of the state-action pairs in the trajectory and prompting the language model to generate a Python implementation. We statically analyze the function for common generation mistakes, as described in Appendix S A.6, and if any are detected, we prompt the model to generate the function again. As illustrated in Figure 2, each API includes a Python implementation (including a function signature, docstring, and code body), where the docstring particularly contains a usage log recording previous executions of the function and a description of the prerequisite state", + "bbox": [ + 169, + 604, + 421, + 925 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/2b099ceaf2b9e84281aa8e1a61245f4853f487f25721e12edf8c65d1e63b7264.jpg", + "image_caption": [ + "Figure 2: An example of a synthesized API from Drugs.com used to identify pills based on their characteristics." + ], + "image_footnote": [], + "bbox": [ + 428, + 607, + 828, + 881 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "of the website for the function to execute without errors.", + "bbox": [ + 171, + 103, + 419, + 132 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.3 Stage III: Skill Honing", + "text_level": 1, + "bbox": [ + 171, + 154, + 382, + 171 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Despite significant efforts within the above modules to ensure the robustness of the synthesized API, it is not always guaranteed. To address this limitation, we integrate a stage to test and debug synthesized APIs. For APIs requiring no extra parameters except the by default Playwright page instance, we execute the API directly as a standalone unit test. For APIs requiring additional parameters, we leverage the LLM to generate appropriate parameter values that serve as comprehensive test cases.", + "bbox": [ + 169, + 183, + 826, + 268 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3 Experiments", + "text_level": 1, + "bbox": [ + 171, + 292, + 318, + 311 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We conducted experiments on both WebArena (Zhou et al., 2024a) and real-world live websites to evaluate our agent. WebArena is a widely used benchmark for web agent evaluation, providing a diverse set of websites designed to simulate real-world web interactions. Real-world websites further demonstrate the performance of our method in more diverse, dynamic, and complex website environments. We perform exploration on each website environment separately to derive the API library, as described in Section § 3.4.", + "bbox": [ + 169, + 327, + 826, + 412 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1 WebArena", + "text_level": 1, + "bbox": [ + 171, + 434, + 292, + 449 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "WebArena is a self-hostable, sandboxed web environment designed for developing and evaluating web agents. It simulates real-world websites across five common application domains: e-commerce (Shopping), social forums (Reddit), collaborative software development (Gitlab), content management (CMS), and navigation (Map). This benchmark includes a total of 812 tasks, each with automated evaluation metrics based on functional correctness. WebArena provides a stable testing environment with reliable automatic evaluation, making it a good platform for demonstrating the efficacy of our agent and analyzing various aspects of our approach. Further details on the WebArena benchmark used in our evaluation can be found in Appendix § B.", + "bbox": [ + 169, + 462, + 826, + 589 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Human-Crafted APIs. The self-hosted WebArena sandbox environment grants access to its source code and administrative access for the simulated websites. This enables us to extract APIs that adhere to standardized protocols (e.g., REST) from the official documentation within the source code as well as from external sources (Song et al., 2024a). We further leverage these human-crafted official APIs to compare them against the APIs synthesized by SKILLWEAVER, demonstrating the quality of the generated APIs.", + "bbox": [ + 169, + 594, + 826, + 679 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.2 Real-World Websites", + "text_level": 1, + "bbox": [ + 171, + 700, + 370, + 715 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Real-world websites present greater complexity and richness than simulated environments. To demonstrate the practicality and efficacy of our approach in real-world environments, we conduct evaluations on live websites. Specifically, we use Online-Mind2Web (Xue et al., 2025; Deng et al., 2023), a benchmark specifically designed for evaluating web agents on live websites. This benchmark encompasses 300 tasks across 136 popular websites spanning diverse domains. The tasks represent realistic scenarios commonly encountered in daily web interactions, as proposed by human annotators. Considering the cost of exploration on all the websites, we only consider websites with a sufficient number of tasks that can be accessed by Playwright. We end up with 4 websites that have at least 8 tasks to run online evaluations (Zheng et al., 2024; Yoran et al., 2024; He et al., 2024) and include 57 tasks, listed in Appendix § C. To ensure the validity of evaluation, we manually evaluate the success of agent trajectories. Specifically, we verify whether the actions fulfill all requirements posed by the task and whether retrieved information matches the target for information-seeking tasks.", + "bbox": [ + 169, + 729, + 828, + 924 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 Agent Implementation", + "text_level": 1, + "bbox": [ + 171, + 103, + 382, + 119 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "By default, we use GPT-4o3 with a temperature of 0.3. Following the default WebArena evaluation settings, we set the maximum number of steps per iteration to 10.", + "bbox": [ + 169, + 132, + 823, + 162 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Baseline Agent. We implement a web agent with only browsing actions based on Code-Act (Wang et al., 2024b), which leverages an LLM to generate Python code using the Playwright browser automation library to interact with web environments through simple atomic actions such as click, type, and scroll. The observation space consists of webpage screenshots, an accessibility tree, and the execution results from previous actions, such as exceptions.", + "bbox": [ + 169, + 167, + 826, + 253 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Agent with Skills. To demonstrate the performance improvements gained from APIs, we extend the action space of the baseline agent by incorporating synthesized skills, enabling the agent to execute API calls. We leverage this agent to evaluate the performance improvements from both APIs synthesized by SKILLWEAVER and human-crafted APIs (Song et al., 2024b) from WebArena's official documentation. Given the extensive number of synthesized APIs, we introduce an API selection module that filters only relevant APIs from the API library. This module also removes APIs that do not satisfy pre-conditions. The prompt and skill serialization format can be found in Appendix § A.8.", + "bbox": [ + 169, + 258, + 826, + 371 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Weaker Agent. To evaluate whether the synthesized APIs can plug and play in weaker agents, we implement a weaker variant by replacing GPT-4o with GPT-4o-mini4 while maintaining the same agent design.", + "bbox": [ + 169, + 376, + 825, + 422 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.4 Exploration", + "text_level": 1, + "bbox": [ + 171, + 441, + 302, + 458 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "During the exploration stage, we employ the agent described above to practice skills. Each website undergoes an exploration process consisting of 160 iterations with GPT-4o, where an iteration is defined as either attempting a proposed skill or testing an existing skill. Within each iteration, the agent proposes either a procedural or navigational task at the starting webpage. It can also propose an information-seeking task at the ending webpage after completing the procedural or navigational task in the previous iteration. During exploration, the agent can access APIs synthesized in the previous exploration steps after skill selection to compose more complex APIs.", + "bbox": [ + 169, + 468, + 826, + 582 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 Results and Analysis", + "text_level": 1, + "bbox": [ + 171, + 603, + 395, + 622 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Experimental Results", + "text_level": 1, + "bbox": [ + 171, + 637, + 372, + 652 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "WebArena Results. Our experiments on WebArena demonstrate consistent performance improvements with the integration of synthesized APIs. As illustrated in Table 1, we observe a substantial relative improvement in success rate, $39.8\\%$ on average, for the baseline agent with GPT-4o and an even larger improvement of $54.3\\%$ with GPT-4o-mini across the evaluated websites.", + "bbox": [ + 169, + 662, + 823, + 734 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "When compared to AutoEval (Pan et al., 2024), which leverages an LLM-based reward model to guide inference-time exploration, SKILLWEAKER achieves higher average success rates and exhibits better or comparable performance across all domains, with the exception of the Shopping environment. The reason behind this exception is the inherent requirements for more extensive interaction with such websites for partially observable information, like dynamic product search results and product details. In comparison with SteP (Sodhi et al., 2024), which incorporates the external memory of domain-specific human-written workflows, SKILLWEAKER achieves better performance on CMS and Map environments, showing the promising quality of agent synthesized APIs even comparing with manually crafted workflows designed by human experts.", + "bbox": [ + 169, + 739, + 826, + 881 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "3Azure Endpoint: gpt-4o-2024-08-06", + "bbox": [ + 189, + 893, + 439, + 910 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "4Azure Endpoint: gpt-4o-mini-2024-07-18", + "bbox": [ + 192, + 910, + 475, + 922 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/e8e603d1e36a974a635e695df504cf8dda0a3650221c82712c73a2e047a0c081.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
MethodGitlabMapShoppingCMSRedditAVG.
WebArena15.015.613.910.46.612.3
AutoEval25.027.539.620.920.826.9
*SteP32.030.037.024.059.033.0
SKILLWEAVER
GPT-4o17.827.519.818.737.722.6
+ Skills22.233.927.225.850.029.8
Δ↑25%↑23%↑38%↑38%↑33%↑32%
GPT-4o-mini6.110.311.83.318.99.2
+ Skills8.916.717.17.726.414.1
Δ↑46%↑62%↑46%↑133%↑40%↑45%
", + "bbox": [ + 282, + 107, + 715, + 276 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/f15aa8a60c451bf19aa9f97d46d05672dafcd4435585dc9510373dfcb676f191.jpg", + "table_caption": [ + "Table 1: Task success rate on WebArena. The numbers in green represent the relative improvement percentage." + ], + "table_footnote": [], + "table_body": "
MethodDrugFlightCookingCarAVG.
Baseline65.011.762.511.140.2
+ Skills87.029.475.011.156.2
Δ↑34%↑151%↑20%↑0%↑40%
", + "bbox": [ + 313, + 330, + 683, + 398 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2: Success rate in real-world website evaluation based on Online-Mind2Web.", + "bbox": [ + 197, + 409, + 797, + 422 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Live Website Results. As shown in Table 2, integrating synthesized APIs leads to an average relative improvement of $39.8\\%$ in success rate across four websites. For the Car website, while the final success rate remained unchanged after incorporating synthesized APIs, we observed that in four out of nine tasks, the synthesized APIs successfully guided the agent to the final state required for task completion. However, the agent still encountered failures during the final steps that demanded strong environment understanding and visual reasoning.", + "bbox": [ + 169, + 449, + 826, + 549 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 Analysis", + "text_level": 1, + "bbox": [ + 171, + 568, + 279, + 584 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Generalization across Website. Generalization across diverse websites, particularly less common websites, remains a significant challenge. SKILLWEAVER presents a new angle to this challenge by integrating website-specific skills and knowledge through external memory in synthesized skill files. SKILLWEAVER is able to encapsulate skills and knowledge on website environments into code files containing accumulated skills.", + "bbox": [ + 169, + 595, + 823, + 667 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To demonstrate the viability of such generalization, we don't include any website specific in the agent and adapt the agent to different websites by purely integrating synthesized API libraries generated during pre-hoc website explorations. As shown in both Table 1 and Table 2, our agent is not only able to achieve stable performance improvements across all 5 WebArena websites but also in the 4 real-world live websites that are more complex and noisy.", + "bbox": [ + 169, + 672, + 823, + 758 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Generalization across Agents. A critical feature of the skill repertoire is its ability to generalize across different web agents. Once the exploration process is completed, the synthesized API library can be reused to expand and enhance the action space of other agents capable of API calling. We conduct a comparative experiment by replacing the backbone language model with GPT-4o-mini without any other modification to the agent.", + "bbox": [ + 169, + 763, + 823, + 835 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "According to the results in Table 1, after switching the language model to GPT-4o-mini, the performance of the baseline agent is substantially weaker than other agents based on GPT-4o. However, upon integration with our synthesized APIs, the agent based on GPT-4o-mini demonstrates remarkable relative success rate improvements ranging from $40\\%$ to $133\\%$ across five websites. Notably, this agent even achieves a better average success rate compared to the WebArena agent based on GPT-4o, particularly in the Map, Shopping, and", + "bbox": [ + 169, + 839, + 828, + 926 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 491, + 946, + 504, + 959 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Reddit websites. These results highlight the transferability of our synthesized APIs as a plug-and-play module to enhance agents.", + "bbox": [ + 169, + 103, + 823, + 133 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As the base agents become stronger, particularly those capable of more sophisticated inference-time search (e.g., Operator), the potential of SKILLWEAVER can be further unleashed. While current agents can accomplish challenging tasks through trial-and-error (Song et al., 2024a) and search over webpages Koh et al. (2024), certain complex skills remain beyond their capabilities. For example, as illustrated in Appendix D.2.3, the task request quote_for_multiple listings requires repeated, sequential, successful searches for specific vehicles, followed by separate quote requests. This process demands both long-range planning and back-tracing capabilities, which present significant challenges for existing agents. As agents grow stronger, SKILLWEAVER will enable the synthesis of increasingly sophisticated and complex skills.", + "bbox": [ + 169, + 138, + 826, + 280 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Comparison with Human-Crafted APIs. We further conducted a comparison of the performance of agent with synthesized skills with the human-crafted APIs extracted from the official documentation and outsourcing websites (Song et al., 2024a). Based on the level of API support, websites are classified into three categories: low, medium, and high.", + "bbox": [ + 169, + 284, + 826, + 342 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "As shown in Figure 3, the performance of the synthesized APIs is comparable with human-crafted APIs on websites with low API support, such as Reddit, and those with medium API support, like Shopping. This result suggests that our exploration process is capable of generating APIs with quality that is comparable to or even superior to manually crafted APIs found in official documentation. On websites with high API support, such as GitLab and Maps, the performance of the synthesized APIs is not as good.", + "bbox": [ + 169, + 347, + 398, + 599 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/d73876740ac36fe6ed2add813b290f0783025673f627d1b0da9c0f205484f2f1.jpg", + "image_caption": [ + "Figure 3: Success rate with synthesized vs. human-crafted APIs." + ], + "image_footnote": [], + "bbox": [ + 406, + 352, + 823, + 563 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3 Case Studies", + "text_level": 1, + "bbox": [ + 171, + 623, + 308, + 636 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Emergence of Compositional APIs. After a certain number of exploration iterations, we observe that the pipeline begins to generate compositional APIs that call multiple simpler APIs. An example is shown in § D.3. This API is responsible for applying multiple filters to search results that involves sequentially calling another API to close a webpage overlay, followed by two additional APIs to refine the search. This emergent behavior indicates an increasing level of abstraction and efficiency in SKILLWEAKER's ability to synthesize and integrate discovered skills.", + "bbox": [ + 169, + 652, + 826, + 751 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitation in Skill Use. LLMs like GPT-4o are still not robust enough at API calling, and that hurts API-augmented agents like ours, even with human-crafted APIs. This challenge is even greater for weaker LLMs such as GPT-4o-mini, as illustrated in Table 1. We identify two primary categories of failures: (1) failure to identify the appropriate API and (2) generating wrong parameters. For example, SKILLWEAKER synthesized APIs to augment agents to search recipes in Cookpad.com. As demonstrated in Appendix D.2.4, the LLM is not able to identify the search Recipes_by_cuisine_type('hamburger') API to finish the task \"Save a hamburger recipe\". In Appendix D.2.5, the LLM successfully identifies the right API search Recipes_by Ingredients(page, 'ingredients') to complete the task \"Browse recipes for gluten-free chocolate chip cookies that can be made without nuts\". However, it generates the wrong keyword 'chocolate chip, -nuts' instead of 'chocolate chip without nuts', leading to empty search results.", + "bbox": [ + 169, + 756, + 826, + 925 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5 Related Work", + "text_level": 1, + "bbox": [ + 171, + 102, + 328, + 118 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Web Agents. Automated web agents have recently emerged as a critical research direction with the vision of automating workflows in the internet. Substantial progress in web agents has been achieved by leveraging powerful LLMs alongside diverse prompting strategies to automate the prediction of web page actions (Zheng et al., 2024; Furuta et al., 2024; Zhou et al., 2024a; Yao et al., 2022). Given inputs like HTML content, accessibility trees, and screenshots, LLM-based approaches use in-context examples, skill usage, and the ReAct framework (Yao et al., 2023) to improve action prediction. These models can be further enhanced with action-coordinate pair data for better grounding (Cheng et al., 2024; Gou et al., 2024; You et al., 2024), learning from multiple web agent trajectories to improve multi-step reasoning and planning (Lai et al., 2024; Shaw et al., 2023; Deng et al., 2023), or training the agent to directly output pixel coordinates (Hong et al., 2024). Another line of work uses planning. Tree search agent (Koh et al., 2024) performs a best-first tree search using an LLM as a heuristic, and AgentQ (Putta et al., 2024) and WebPilot (Zhang et al., 2024) employ Monte Carlo Tree Search. The World-model-augmented web agent (Chae et al., 2024) and WebDreamer (Gu et al., 2024b) integrate LLM-based world models to anticipate the outcomes of its actions.", + "bbox": [ + 174, + 133, + 828, + 357 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Tool Use and Synthesis. With the great potentials exhibited by tool-augmented LMs (Schick et al., 2023; Wang et al., 2024c), many works explore using LLMs to make tools across math reasoning (Cai et al., 2024; Qian et al., 2023; Yuan et al., 2024a), theorem proving (Wang et al., 2024a), structured data analysis (Lu et al., 2023; Wang et al., 2024d), and digital agent automation (Wang et al., 2023a; Gu et al., 2024a; Song et al., 2024b). However, existing methods rely on a set of existing training examples to determine tool correctness (Yuan et al., 2024a), or user input queries to bootstrap the tool-making system (Wang et al., 2024d). Our work does not necessitate annotated data and enables agents to gather experiences by self-exploring the environment while synthesizing more skills.", + "bbox": [ + 169, + 362, + 826, + 491 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Self-Improvement. Many works explore collecting trajectories through experience, rating the success with a reward model, and using the results for policy training (Patel et al., 2024; Huang et al., 2023; Madaan et al., 2023; Wang et al., 2023b; Singh et al., 2024; Chen et al., 2024; Yuan et al., 2024b). These approaches can be broadly categorized into parametric and non-parametric approaches. Parametric training with exploration trajectories has been investigated using various strategies, such as supervised fine-tuning (e.g., WebGUM (Furuta et al., 2024), Patel et al. (2024), and ScribeAgent (Shen et al., 2024)) and reinforcement learning-style optimization (e.g., ETO (Song et al., 2024a) and PAE (Zhou et al., 2024b)). Non-parametric methods typically transform exploration trajectories into reusable APIs or workflows that can be composed into more complex skills, like Voyager (Wang et al., 2023a) an LLM-based lifelong learning agent that continually explores the Minecraft environment. Friday (Wu et al., 2024) also investigates self-improvement on operating systems through exploration with pre-defined learning objectives. For web agents, the collected trajectories can be used as examples for in-context learning (Murty et al., 2024b), provide targets for fine-tuning (Furuta et al., 2024; Patel et al., 2024; Song et al., 2024a), and be distilled into text instructions to reproduce the desired behavior (workflows) (Wang et al., 2024e). Our work focuses on enabling web agents to continuously expand their skill repertoire by autonomously exploring website environments and distilling successful trajectories into reusable skills represented as Python APIs. These APIs are created to enable the agent to operate more efficiently during subsequent exploration and task execution.", + "bbox": [ + 174, + 494, + 828, + 776 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 171, + 792, + 308, + 809 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Web agents aim to automate browsing tasks to enhance human productivity across diverse digital environments. A key challenge lies in adapting to real-world websites characterized by high diversity and complexity. Inspired by the self-improvement mechanism of humans, we propose SKILLWEAYER, a skill-centric framework that enables web agents to autonomously self-improve through exploration and API synthesis. Experimental results demonstrate substantial performance improvements, with relative success rate gains of $31.8\\%$ on WebArena benchmark and $39.8\\%$ on real-world websites. Notably, weaker agents", + "bbox": [ + 169, + 825, + 828, + 926 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 948, + 504, + 959 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "equipped with skills from more powerful counterparts showed improvements of up to $54.3\\%$ , demonstrating effective knowledge distillation through lightweight, transferable modules. Our findings point to a promising future where web agents can learn to improve themselves without parameter tuning by evolving a collective skill base.", + "bbox": [ + 169, + 103, + 823, + 161 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Acknowledgments", + "text_level": 1, + "bbox": [ + 171, + 181, + 346, + 200 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We would like to extend our appreciation to colleagues from the OSU NLP group and Anqi Feng for their insightful comments. This work is supported by a gift from Cisco.", + "bbox": [ + 169, + 214, + 823, + 244 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Ethics Statement", + "text_level": 1, + "bbox": [ + 171, + 265, + 326, + 282 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Generalist web agents hold the potential to automate routine web tasks, enhance user experiences, and promote web accessibility, safety concerns related to their real-world deployment are also critical. These concerns span privacy issues, such as access to users' personal profiles, and sensitive operations, such as financial transactions or application form submissions. During online evaluation, we noticed the possibility of these web agents generating harmful actions on the web, and we manually validated the safety of all the actions before execution. Exploration on live website may lead to potential safety concerns about trigger harmful actions. In case the agent is being used on a live website, we add safety instructions which guardrail the agent away from actions that could potentially have side effects or that would otherwise be disadvantageous to explore, such as creating accounts or interacting with human users. It is critical for further research to thoroughly assess and mitigate the safety risks associated with web agents, ensuring they are safeguarded against producing and executing harmful actions. The code will also be released solely for research purposes, with the goal of making the web more accessible via language technologies under an OPEN-RAIL license. We are strongly against any potentially harmful use of the data or technology by any party.", + "bbox": [ + 169, + 297, + 826, + 522 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 171, + 542, + 274, + 558 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Tianle Cai, Xuezhi Wang, Tengyu Ma, Xinyun Chen, and Denny Zhou. Large language models as tool makers. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=qV83K9d5WB.", + "Hyungjoo Chae, Namyoung Kim, Kai Tzu iunn Ong, Minju Gwak, Gwanwoo Song, Ji-hoon Kim, Sunghwan Kim, Dongha Lee, and Jinyoung Yeo. Web agents with world models: Learning and leveraging environment dynamics in web navigation. ArXiv, abs/2410.13232, 2024. URL https://api-semanticscholar.org/CorpusID:273404026.", + "Zixiang Chen, Yihe Deng, Huizhuo Yuan, Kaixuan Ji, and Quanquan Gu. Self-play finetuning converts weak language models to strong language models. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=04cHTxW9BS.", + "Kanzhi Cheng, Qiushi Sun, Yougang Chu, Fangzhi Xu, Yantao Li, Jianbing Zhang, and Zhiyong Wu. Seeclick: Harnessing GUI grounding for advanced visual GUI agents. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August 11-16, 2024, pp. 9313-9332. Association for Computational Linguistics, 2024. doi: 10.18653/V1/2024.ACL-LONG.505. URL https://doi.org/10.18653/v1/2024.acl-long.505.", + "Xiang Deng, Yu Gu, Boyuan Zheng, Shijie Chen, Samual Stevens, Boshi Wang, Huan Sun, and Yu Su. Mind2web: Towards a generalist agent for the web. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine" + ], + "bbox": [ + 171, + 566, + 828, + 925 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "(eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitnet/5950bf290a1570ea401bf98882128160-Abstract-Datasets_and_Benchmarks.html.", + "Yuqing Du, Olivia Watkins, Zihan Wang, Cedric Colas, Trevor Darrell, Pieter Abbeel, Abhishek Gupta, and Jacob Andreas. Guiding pretraining in reinforcement learning with large language models. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett (eds.), International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, volume 202 of Proceedings of Machine Learning Research, pp. 8657-8677. PMLR, 2023. URL https://proceedings.mlr.press/v202/du23f.html.", + "Hiroki Furuta, Kuang-Huei Lee, Ofir Nachum, Yutaka Matsuo, Aleksandra Faust, Shixiang Shane Gu, and Izzeddin Gur. Multimodal web navigation with instructionfinetuned foundation models. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=efFmBWioSc.", + "Boyu Gou, Ruohan Wang, Boyuan Zheng, Yanan Xie, Cheng Chang, Yiheng Shu, Huan Sun, and Yu Su. Navigating the digital world as humans do: Universal visual grounding for gui agents. arXiv preprint arXiv:2410.05243, 2024.", + "Yu Gu, Yiheng Shu, Hao Yu, Xiao Liu, Yuxiao Dong, Jie Tang, Jayanth Srinivasa, Hugo Latapie, and Yu Su. Middleware for llms: Tools are instrumental for language agents in complex environments. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pp. 7646-7663. Association for Computational Linguistics, 2024a. URL https://aclanthology.org/2024.emnlp-main.436.", + "Yu Gu, Kai Zhang, Yuting Ning, Boyuan Zheng, Boyu Gou, Tianci Xue, Cheng Chang, Sanjari Srivastava, Yanan Xie, Peng Qi, Huan Sun, and Yu Su. Is your llm secretly a world model of the internet? model-based planning for web agents, 2024b. URL https://arxiv.org/abs/2411.06559.", + "Hongliang He, Wenlin Yao, Kaixin Ma, Wenhao Yu, Yong Dai, Hongming Zhang, Zhenzhong Lan, and Dong Yu. Webvoyager: Building an end-to-end web agent with large multimodal models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August 11-16, 2024, pp. 6864-6890. Association for Computational Linguistics, 2024. doi: 10.18653/V1/2024.ACL-LONG.371. URL https://doi.org/10.18653/v1/2024.acl-long.371.", + "Wenyi Hong, Weihan Wang, Qingsong Lv, Jiazheng Xu, Wenmeng Yu, Junhui Ji, Yan Wang, Zihan Wang, Yuxiao Dong, Ming Ding, and Jie Tang. Cogagent: A visual language model for gui agents. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14281-14290, 2024.", + "Jiaxin Huang, Shixiang Gu, Le Hou, Yuexin Wu, Xuezhi Wang, Hongkun Yu, and Jiawei Han. Large language models can self-improve. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, EMNLP 2023, Singapore, December 6-10, 2023, pp. 1051-1068. Association for Computational Linguistics, 2023. doi: 10.18653/V1/2023.EMNLP-MAIN.67. URL https://doi.org/10.18653/v1/2023.emnlp-main.67.", + "Jing Yu Koh, Stephen McAleer, Daniel Fried, and Ruslan Salakhutdinov. Tree search for language model agents. CoRR, abs/2407.01476, 2024. doi: 10.48550/ARXIV.2407.01476. URL https://doi.org/10.48550/arXiv.2407.01476.", + "Hanyu Lai, Xiao Liu, Iat Long Iong, Shuntian Yao, Yuxuan Chen, Pengbo Shen, Hao Yu, Hanchen Zhang, Xiaohan Zhang, Yuxiao Dong, et al. Autowebglm: A large language" + ], + "bbox": [ + 169, + 102, + 828, + 926 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "model-based web navigating agent. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, pp. 5295-5306, 2024.", + "Wei Li, William E. Bishop, Alice Li, Christopher Rawles, Folawiyo Campbell-Ajala, Divya Tyamagundlu, and Oriana Riva. On the effects of data scale on UI control agents. In Amir Globersons, Lester Mackey, Danielle Belgrave, Angela Fan, Ulrich Paquet, Jakub M. Tomczak, and Cheng Zhang (eds.), Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024, 2024. URL http://papers.nips.cc/paper_files/paper/2024/bitstream/a79f3ef3b445fd4659f44648f7ea8ffd-Abstract-Datasets_and_Benchmarks_Track.html.", + "Pan Lu, Baolin Peng, Hao Cheng, Michel Galley, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, and Jianfeng Gao. Chameleon: Plug-and-play compositional reasoning with large language models. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/black/871ed095b734818cfba48db6aab25a62-Abstract-Conference.html.", + "Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegrefe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, Shashank Gupta, Bodhisattwa Prasad Majumder, Katherine Hermann, Sean Welleck, Amir Yazdanbakhsh, and Peter Clark. Self-refine: Iterative refinement with self-feedback. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bit/91edff07232fb1b55a505a9e9f6c0ff3-Abstract-Conference.html.", + "Shikhar Murty, Dzmitry Bahdanau, and Christopher D. Manning. Nnetscape navigator: Complex demonstrations for web agents without a demonstrator. CoRR, abs/2410.02907, 2024a. doi: 10.48550/ARXIV.2410.02907. URL https://doi.org/10.48550/arXiv.2410.02907.", + "Shikhar Murty, Christopher D. Manning, Peter Shaw, Mandar Joshi, and Kenton Lee. BAGEL: bootstrapping agents by guiding exploration with language. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024b. URL https://openreview.net/forum?id=VsvfSMI5bs.", + "Vardaan Pahuja, Yadong Lu, Corby Rosset, Boyu Gou, Arindam Mitra, Spencer Whitehead, Yu Su, and Ahmed Awadallah. Explorer: Scaling exploration-driven web trajectory synthesis for multimodal web agents. 2025. URL https://api-semanticscholar.org/ CorpusID:276408442.", + "Jiayi Pan, Yichi Zhang, Nicholas Tomlin, Yifei Zhou, Sergey Levine, and Alane Suhr. Autonomous evaluation and refinement of digital agents. CoRR, abs/2404.06474, 2024. doi: 10.48550/ARXIV.2404.06474. URL https://doi.org/10.48550/arXiv.2404.06474.", + "Ajay Patel, Markus Hofmarcher, Claudiu Leoveanu-Condrei, Marius-Constantin Dinu, Chris Callison-Burch, and Sepp Hochreiter. Large language models can self-improve at web agent tasks. CoRR, abs/2405.20309, 2024. doi: 10.48550/ARXIV.2405.20309. URL https://doi.org/10.48550/arXiv.2405.20309.", + "Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent Q: advanced reasoning and learning for autonomous AI agents. CoRR, abs/2408.07199, 2024. doi: 10.48550/ARXIV.2408.07199. URL https://doi.org/10.48550/arXiv.2408.07199.", + "Cheng Qian, Chi Han, Yi Fung, Yujia Qin, Zhiyuan Liu, and Heng Ji. CREATOR: Tool creation for disentangling abstract and concrete reasoning of large language models. In The 2023 Conference on Empirical Methods in Natural Language Processing, 2023. URL https://openreview.net/forum?id=aCHq10rQiH." + ], + "bbox": [ + 171, + 103, + 826, + 922 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Gabriel Sarch, Lawrence Jang, Michael J. Tarr, William W. Cohen, Kenneth Marino, and Katerina Fragkiadaki. Ical: Continual learning of multimodal agents by transforming trajectories into actionable insights. ArXiv, abs/2406.14596, 2024. URL https://api.sementicscholar.org/CorpusID:274466462.", + "Timo Schick, Jane Dwivedi-Yu, Roberto Dessi, Roberta Raileanu, Maria Lomeli, Eric Hambro, Luke Zettlemoyer, Nicola Cancedda, and Thomas Scialom. Toolformer: Language models can teach themselves to use tools. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitstream/d842425e4bf79ba039352da0f658a906-Abstract-Conference.html.", + "Peter Shaw, Mandar Joshi, James Cohan, Jonathan Berant, Panupong Pasupat, Hexiang Hu, Urvashi Khandelwal, Kenton Lee, and Kristina Toutanova. From pixels to UI actions: Learning to follow instructions via graphical user interfaces. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitstream/6c52a8a4fadc9129c6e1d1745f2dfd0f-Abstract-Conference.html.", + "Junhong Shen, Atishay Jain, Zedian Xiao, Ishan Amlekar, Mouad Hadji, Aaron Podolny, and Ameet Talwalkar. Scribeagent: Towards specialized web agents using production-scale workflow data. ArXiv, abs/2411.15004, 2024. URL https://api(semanticscholar.org/ CorpusID:274192657.", + "Avi Singh, John D. Co-Reyes, Rishabh Agarwal, Ankesh Anand, Piyush Patil, Xavier Garcia, Peter J. Liu, James Harrison, Jaehoon Lee, Kelvin Xu, Aaron T. Parisi, Abhishek Kumar, Alexander A. Alemi, Alex Rizkowsky, Azade Nova, Ben Adlam, Bernd Bohnet, Gamaleldin Fathy Elsayed, Hanie Sedghi, Igor Mordatch, Isabelle Simpson, Izzeddin Gur, Jasper Snoek, Jeffrey Pennington, Jiri Hron, Kathleen Kenealy, Kevin Swersky, Kshitteej Mahajan, Laura Culp, Lechao Xiao, Maxwell L. Bileschi, Noah Constant, Roman Novak, Rosanne Liu, Tris Warkentin, Yundi Qian, Yamini Bansal, Ethan Dyer, Behnam Neyshabur, Jascha Sohl-Dickstein, and Noah Fiedel. Beyond human data: Scaling self-training for problem-solving with language models. Trans. Mach. Learn. Res., 2024, 2024. URL https://openreview.net/forum?id=1NAyUngGFK.", + "Paloma Sodhi, S. R. K. Branavan, Yoav Artzi, and Ryan McDonald. Step: Stacked llm policies for web actions, 2024. URL https://arxiv.org/abs/2310.03720.", + "Yifan Song, Da Yin, Xiang Yue, Jie Huang, Sujian Li, and Bill Yuchen Lin. Trial and error: Exploration-based trajectory optimization for LLM agents. CoRR, abs/2403.02502, 2024a. doi: 10.48550/ARXIV.2403.02502. URL https://doi.org/10.48550/arXiv.2403.02502.", + "Yueqi Song, Frank F. Xu, Shuyan Zhou, and Graham Neubig. Beyond browsing: Api-based web agents. ArXiv, abs/2410.16464, 2024b. URL https://api-semanticscholar.org/ CorpusID:273507298.", + "Hongjin Su, Ruoxi Sun, Jinsung Yoon, Pengcheng Yin, Tao Yu, and Sercan Ö. Arik. Learn-by-interact: A data-centric framework for self-adaptive agents in realistic environments. CoRR, abs/2501.10893, 2025. doi: 10.48550/ARXIV.2501.10893. URL https://doi.org/10.48550/arXiv.2501.10893.", + "Fahim Tajwar, Yiding Jiang, Abitha Thankaraj, Sumaita Sadia Rahman, J. Zico Kolter, Jeff Schneider, and Ruslan Salakhutdinov. Training a generally curious agent. CoRR, abs/2502.17543, 2025. doi: 10.48550/ARXIV.2502.17543. URL https://doi.org/10.48550/arXiv.2502.17543.", + "Guanzhi Wang, Yuqi Xie, Yunfan Jiang, Ajay Mandlekar, Chaowei Xiao, Yuke Zhu, Linxi (Jim) Fan, and Anima Anandkumar. Voyager: An open-ended embodied agent" + ], + "bbox": [ + 171, + 102, + 830, + 926 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "with large language models. Trans. Mach. Learn. Res., 2024, 2023a. URL https://api.sementicscholar.org/CorpusID:258887849.", + "Haiming Wang, Huajian Xin, Chuanyang Zheng, Zhengying Liu, Qingxing Cao, Yinya Huang, Jing Xiong, Han Shi, Enze Xie, Jian Yin, Zhenguo Li, and Xiaodan Liang. LEGO: Neural theorem proving with growing libraries. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=3f5PALef5B.", + "Xingyao Wang, Yangyi Chen, Lifan Yuan, Yizhe Zhang, Yunzhu Li, Hao Peng, and Heng Ji. Executable code actions elicit better LLM agents. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024b. URL https://openreview.net/forum?id=jJ9BoXAfFa.", + "Yizhong Wang, Yeganeh Kordi, Swaroop Mishra, Alisa Liu, Noah A. Smith, Daniel Khashabi, and Hannaneh Hajishirzi. Self-instruct: Aligning language models with self-generated instructions. In Anna Rogers, Jordan L. Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2023, Toronto, Canada, July 9-14, 2023, pp. 13484-13508. Association for Computational Linguistics, 2023b. doi: 10.18653/V1/2023.ACL-LONG.754. URL https://doi.org/10.18653/v1/2023.acl-long.754.", + "Zhiruo Wang, Zhoujun Cheng, Hao Zhu, Daniel Fried, and Graham Neubig. What are tools anyway? a survey from the language model perspective. In First Conference on Language Modeling, 2024c. URL https://openreview.net/forum?id=Xh1B90iBSR.", + "Zhiruo Wang, Graham Neubig, and Daniel Fried. TroVE: Inducing verifiable and efficient toolboxes for solving programmatic tasks. In *Forty-first International Conference on Machine Learning*, 2024d. URL https://openreview.net/forum?id=DCNCwaMJjI.", + "Zora Zhiruo Wang, Jiayuan Mao, Daniel Fried, and Graham Neubig. Agent workflow memory. arXiv preprint arXiv:2409.07429, 2024e.", + "Zhiyong Wu, Chengcheng Han, Zichen Ding, Zhenmin Weng, Zhoumianze Liu, Shunyu Yao, Tao Yu, and Lingpeng Kong. Os-copilot: Towards generalist computer agents with self-improvement. CoRR, abs/2402.07456, 2024. doi: 10.48550/ARXIV.2402.07456. URL https://doi.org/10.48550/arXiv.2402.07456.", + "Tianbao Xie, Danyang Zhang, Jixuan Chen, Xiaochuan Li, Siheng Zhao, Ruisheng Cao, Toh Jing Hua, Zhoujun Cheng, Dongchan Shin, Fangyu Lei, Yitao Liu, Yiheng Xu, Shuyan Zhou, Silvio Savarese, Caiming Xiong, Victor Zhong, and Tao Yu. Osworld: Benchmarking multimodal agents for open-ended tasks in real computer environments. CoRR, abs/2404.07972, 2024. doi: 10.48550/ARXIV.2404.07972. URL https://doi.org/10.48550/arXiv.2404.07972.", + "Tianci Xue, Weijian Qi, Tianneng Shi, Chan Hee Song, Boyu Gou, Dawn Song, Huan Sun, and Yu Su. An illusion of progress? assessing the current state of web agents. OSU NLP Blog, Mar 2025. URL https://tinyurl.com/online-mind2web-blog.", + "Shunyu Yao, Howard Chen, John Yang, and Karthik Narasimhan. Webshop: Towards scalable real-world web interaction with grounded language agents. In Sanmi Koyejo, S. Mohamed, A. Agarwal, Danielle Belgrave, K. Cho, and A. Oh (eds.), Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, New Orleans, LA, USA, November 28 - December 9, 2022, 2022. URL http://papers.nips.cc/paper_files/paper/2022/bit/82ad13ec01f9fe44c01cb91814fd7b8c-Abstract-Conference.htm1.", + "Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik R. Narasimhan, and Yuan Cao. React: Synergizing reasoning and acting in language models. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net, 2023. URL https://openreview.net/forum?id=WE_vluYUL-X." + ], + "bbox": [ + 173, + 103, + 826, + 924 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Ori Yoran, Samuel Joseph Amouyal, Chaitanya Malaviya, Ben Boin, Ofir Press, and Jonathan Berant. Assistantbench: Can web agents solve realistic and time-consuming tasks? In Conference on Empirical Methods in Natural Language Processing, 2024. URL https://api_semanticscholar.org/CorpusID:271328691.", + "Keen You, Haotian Zhang, Eldon Schoop, Floris Weers, Amanda Swearngin, Jeffrey Nichols, Yinfei Yang, and Zhe Gan. Ferret-ui: Grounded mobile UI understanding with multimodal llms. In Ales Leonardis, Elisa Ricci, Stefan Roth, Olga Russakovsky, Torsten Sattler, and Gül Varol (eds.), Computer Vision - ECCV 2024 - 18th European Conference, Milan, Italy, September 29-October 4, 2024, Proceedings, Part LXIV, volume 15122 of Lecture Notes in Computer Science, pp. 240-255. Springer, 2024. doi: 10.1007/978-3-031-73039-9\\_.14. URL https://doi.org/10.1007/978-3-031-73039-9_14.", + "Lifan Yuan, Yangyi Chen, Xingyao Wang, Yi Fung, Hao Peng, and Heng Ji. CRAFT: Customizing LLMs by creating and retrieving from specialized toolsets. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=G0vdDSt9XM.", + "Weizhe Yuan, Richard Yuanzhe Pang, Kyunghyun Cho, Xian Li, Sainbayar Sukhbaatar, Jing Xu, and Jason Weston. Self-rewarding language models. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024b. URL https://openreview.net/forum?id=ØNphYCmgua.", + "Yao Zhang, Zijian Ma, Yunpu Ma, Zhen Han, Yu Wu, and Volker Tresp. Webpilot: A versatile and autonomous multi-agent system for web task execution with strategic exploration. CoRR, abs/2408.15978, 2024. doi: 10.48550/ARXIV.2408.15978. URL https://doi.org/10.48550/arXiv.2408.15978.", + "Boyuan Zheng, Boyu Gou, Jihyung Kil, Huan Sun, and Yu Su. Gpt-4v(isdiction) is a generalist web agent, if grounded. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=piecKJ2D1B.", + "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. Judging llm-as-a-judge with mt-bench and chatbot arena. Advances in Neural Information Processing Systems, 36:46595-46623, 2023.", + "Shuyan Zhou, Frank F. Xu, Hao Zhu, Xuhui Zhou, Robert Lo, Abishek Sridhar, Xianyi Cheng, Tianyue Ou, Yonatan Bisk, Daniel Fried, Uri Alon, and Graham Neubig. Webarena: A realistic web environment for building autonomous agents. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024a. URL https://openreview.net/forum?id=oKn9c6ytLx.", + "Yifei Zhou, Qianlan Yang, Kaixiang Lin, Min Bai, Xiong Zhou, Yu-Xiong Wang, Sergey Levine, and Li Erran Li. Proposer-agent-evaluator(pae): Autonomous skill discovery for foundation model internet agents. CoRR, abs/2412.13194, 2024b. doi: 10.48550/ARXIV.2412.13194. URL https://doi.org/10.48550/arXiv.2412.13194.", + "Mingchen Zhuge, Changsheng Zhao, Dylan Ashley, Wenyi Wang, Dmitrii Khizbullin, Yunyang Xiong, Zechun Liu, Ernie Chang, Raghuraman Krishnamoorthi, Yuandong Tian, et al. Agent-as-a-judge: Evaluate agents with agents. arXiv preprint arXiv:2410.10934, 2024." + ], + "bbox": [ + 173, + 102, + 826, + 789 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 946, + 506, + 959 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Table of Content:", + "text_level": 1, + "bbox": [ + 171, + 103, + 303, + 118 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "- Appendix A: Prompts", + "bbox": [ + 215, + 125, + 393, + 140 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Appendix A.1: Skill Proposal", + "- Appendix A.2: Code Generation Agent Context Representation", + "- Appendix A.3: Code Generation Agent Error Detection", + "- Appendix A.4: Action Synthesis", + "- Appendix A.5: Success Checking", + "- Appendix A.6: Persisting Attempt to Knowledge Base", + "- Appendix A.7: Scraping Prompts" + ], + "bbox": [ + 246, + 142, + 722, + 255 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Appendix B: WebArena Benchmark", + "- Appendix C: Real World Website Tasks", + "- Appendix D: Example APIs" + ], + "bbox": [ + 215, + 257, + 514, + 299 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Appendix D.1: Success Cases", + "- Appendix D.2: Failure Cases", + "- Appendix D.3: Composition API." + ], + "bbox": [ + 246, + 301, + 506, + 348 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A Prompts", + "text_level": 1, + "bbox": [ + 171, + 102, + 287, + 119 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A.1 Skill Proposal", + "text_level": 1, + "bbox": [ + 171, + 133, + 325, + 152 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In case the agent is being used on a live website, we add safety instructions which bias the agent away from actions that could potentially have side effects or that would otherwise be disadvantageous to explore, such as creating accounts or interacting with human users.", + "bbox": [ + 169, + 161, + 826, + 205 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Proposing Tasks to Explore", + "text_level": 1, + "bbox": [ + 197, + 215, + 398, + 232 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "System: You propose tasks that would make good 'tools' for external users of a website. User:", + "bbox": [ + 197, + 243, + 802, + 260 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "You are a 'web agent' who is learning how to use a website. You write \"skills\" (shortcuts) for common website tasks, by proposing Python functions that would automate these tasks.", + "bbox": [ + 197, + 263, + 756, + 305 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "You have already proposed the following skills: \n \n{procedural knowledge} \n", + "bbox": [ + 197, + 314, + 612, + 367 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "You have built up the following knowledge about the website (in addition to the current screenshot): {semantic_knowledge} ", + "bbox": [ + 197, + 377, + 754, + 443 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Now please come up with something new to learn how to do on this website. The website is structured according to the following accessibility tree hierarchy: {ax_tree} ", + "bbox": [ + 197, + 453, + 769, + 542 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Do not interact with the Advanced Reporting tab if you are using \nMagenta. \nDo not interact with login/login/user accounts on any site. \nIf you're on OpenStreetMap, don't interact with community featur", + "bbox": [ + 197, + 553, + 764, + 606 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Write a list of useful skills/shortcuts that you would want to have built into a website as Python functions. Write the name in natural language format. Do not use $*\\_id$ as a parameter in your skill. Again, your goal is to generate functions that would be useful \"shortcuts\" for users of the website, so you should prioritize generating skills that compress a couple interactions into a single function call. Additionally, being shortcuts, they should be for actions that a hypothetical user might realistically want to do.", + "bbox": [ + 197, + 616, + 792, + 732 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Then, estimate:", + "text_level": 1, + "bbox": [ + 197, + 742, + 331, + 755 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "(1) how useful they are (5 being difficult and frequency, 1 being trivial or uncommon),", + "(2) the expected number of clicking/typing actions required to complete the skill. (calculate this by writing the list of steps and counting AFTERWARDs)" + ], + "bbox": [ + 200, + 756, + 795, + 819 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Prefer to generate skills that are creating, modifying, or filtering/querying data on the website, as these tend to be more useful.", + "bbox": [ + 199, + 830, + 797, + 868 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Do not generate skills simply to perform single clicks.", + "bbox": [ + 197, + 869, + 681, + 883 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "{safetyInstructions}", + "bbox": [ + 199, + 893, + 385, + 909 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Then, calculate the sum of these ratings for each skill. Finally, select the skill with the highest rating.", + "bbox": [ + 197, + 125, + 781, + 152 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Write your ratings in `step_by_step_reasoning`. Then, write your skill choice in `proposed_skill`.", + "bbox": [ + 199, + 152, + 764, + 178 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A.2 Code Generation Agent Context Representation", + "text_level": 1, + "bbox": [ + 171, + 214, + 571, + 231 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A.2.1 States as Accessibility Trees", + "text_level": 1, + "bbox": [ + 171, + 241, + 433, + 258 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We represent the state as a tuple containing an screenshot of the page, a title, a URL, and an accessibility tree hierarchy. These are serialized by concatenating clean string representations of each of these properties. We represent each node in the accessibility hierarchy as a line containing (1) indentation representing the depth in the tree, (2) the accessible role, (3) the accessible name of the element, in quotation marks, and finally (4) any other accessibility tree information (represented as a ia- attributes in the DOM.)", + "bbox": [ + 169, + 266, + 826, + 349 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "A.2.2 Prompts", + "text_level": 1, + "bbox": [ + 171, + 366, + 292, + 382 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Substrings in square brackets ([]) are omitted if not applicable.", + "bbox": [ + 171, + 388, + 629, + 407 + ], + "page_idx": 17 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Code Agent Context Representation" + ], + "code_body": "URL: {relative_url} \n{ state_accessibility_tree_string } \n \n{step_by_step_reasoning} \n \n \n{action_source} \n \n[ \n{stdout} \n] \n[ \n{return_value} \n] \n[< warnings> \n{ warnings} \n] \n[ \n{ recovered_exception} \n, ...] \n[ \n{unrecoverable_exception} \n] \nURL: {relative_url}", + "guess_lang": "tcl", + "bbox": [ + 173, + 435, + 823, + 920 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 17 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "{state_accessibility_tree_string} \n \n... \n \nURL: {relative_url} \n{state_accessibility_tree_string} \n \n \n{result} \n", + "guess_lang": "txt", + "bbox": [ + 196, + 125, + 419, + 303 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "A.3 Code Generation Agent Error Detection", + "text_level": 1, + "bbox": [ + 171, + 339, + 511, + 356 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "This is the prompt template used if we detect any errors in the agent's code. We use the following error messages:", + "bbox": [ + 169, + 364, + 823, + 396 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. A function named act not provided: Function name must be 'act'", + "2. The function act does not contain a single argument, page: Function must take exactly one argument: 'page'.", + "3. The function function is disabled: Function 'fnname' is disabled. Please use another function.", + "4. Incorrect or buggy Playwright functions: Please use the 'page.get_by...().f()' functions instead of the 'page.f(selector)' functions. where $f \\in \\{click, fill, type\\}$ .", + "5. Use of CSS selectors instead of accessibility tree selectors: Please use Accessibility Tree-centric selectors, like 'page.get_by_role()', '.nth()', instead of the CSS-style selectors like '.locator' or '.query_selector()'.", + "6. Blank response: You did not provide any Python code, but you also did not provide a result for 'terminate_with_result'. Please provide one or the other.", + "7. Type errors: Type Error: {error}" + ], + "bbox": [ + 207, + 405, + 823, + 656 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "A.4 Action Synthesis", + "text_level": 1, + "bbox": [ + 171, + 670, + 346, + 686 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Code Agent Action Generation", + "text_level": 1, + "bbox": [ + 197, + 702, + 426, + 718 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "You generate Playwright code to interact with websites. Words of wisdom:", + "bbox": [ + 197, + 729, + 684, + 744 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- If you want to click a generic button (e.g., that belongs to an element), use the full .get_by-role() path to the element (e.g., .get_by-role(\"group\", name=\"Test Item\").get_by-role(\"button\", name=\"Go\") instead of .get_by-role(\"button\", name=\"Go\"), as this is ambiguous).", + "- Write exceptionally correct Python code.", + "- You love to take advantage of functions in the knowledge_base whenever possible. You use them via Python function calls. It is required to use the knowledge base function corresponding to an action if it exists.", + "- Use relative goto when you can. \nare currently on a webpage titled {title_repr}, with the URL {url_repr}. \nder the following Accessibility Tree. The accessibility tree is formatted like this:" + ], + "bbox": [ + 240, + 748, + 800, + 909 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 18 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "[role] [\"name\"]\n[properties] {{{child1]}}; \n[role] [\"name\"]\n[properties]; // no children \nHere is an example: \n \narticle \"Willy Wonka's Chocolate Factory Opens Its Gates\" {{{link \"Share\";}}} \n}; \narticle \"Hogwarts Begins Accepting Applicants for Class of 2029\" {{{link \"Share\";}}} \n \nTo select the first link, you can do (because name = case-insensitive substring match unless exact=True is supplied): \nfirst_link = page.get_by_role(\"article\", name=\"Willy Wonka\").get_by_role(\"link\", name=\"Share\") \nHere is another example: \n \narticle {{{header \"How can I install CUDA on Ubuntu 22?\"; link \"Share\";}}} \n}; \narticle {{{header \"How do I install Docker?\"; link \"Share\";}}} \n \nTo select the first link, you can do: \npage.get_byRole(\"article\").filter (has=page.get_byRole(\"header\", \"How can I install CUDA\").get_byRole(\"link\", name=\"Share\") \nHere, the .filter() is necessary to ensure that we select the correct article. This is especially important in cases where the parent element doesn't include relevant criteria for identifying a child element. Tips about this tree: \n· If you see a node as a child of an iframe, you must use page.frame(name=...), and then access the node (via .get_by ROLE() or similar). \n· Some elements will require interaction via .select_option(). They will be labeled as such. This is because they are HTML aka get_by_placeholder(\"Search GitLab\")", + "2) > aka locator([[id=\\\"__BVID__158\\\"]])" + ], + "bbox": [ + 225, + 724, + 797, + 816 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "Call log:", + "bbox": [ + 197, + 825, + 259, + 839 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "", + "bbox": [ + 197, + 839, + 295, + 852 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "", + "bbox": [ + 199, + 890, + 266, + 901 + ], + "page_idx": 31 + }, + { + "type": "page_number", + "text": "32", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 31 + }, + { + "type": "text", + "text": "URL: /byte blaze/accessible- html-content-patterns/-/branches", + "bbox": [ + 196, + 111, + 627, + 128 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "[Omitted] ", + "bbox": [ + 197, + 138, + 269, + 162 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "", + "bbox": [ + 197, + 176, + 287, + 189 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "The page currently displays a dialog that asks for confirmation to delete all merged branches. It requires typing 'delete' into an input box. After typing the confirmation text, the \"Delete merged branches\" button can be clicked to proceed. I will fill the textbox with 'delete' and then click the confirmation button to complete the task.", + "bbox": [ + 197, + 189, + 784, + 252 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "", + "bbox": [ + 199, + 253, + 295, + 265 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "", + "bbox": [ + 199, + 277, + 254, + 289 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "async def act(page): # Fill the confirmation textbox with 'delete' await page.get_by_role(\"dialog\", name=\"Delete all merged branches?\").get_by-role(\"textbox\").fill(\"delete\") # Click the \"Delete merged branches\" button to confirm deletion await page.get_by-role(\"dialog\", name=\"Delete all merged branches?\").get_by-role(\"button\", name=\"Delete merged branches\").click()", + "guess_lang": "python", + "bbox": [ + 212, + 309, + 792, + 436 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "", + "guess_lang": "txt", + "bbox": [ + 197, + 459, + 264, + 493 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "", + "bbox": [ + 197, + 511, + 264, + 521 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "URL: /byte blaze/accessible- html-content-patterns/-/branches", + "bbox": [ + 199, + 522, + 625, + 536 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "[Omitted] ", + "guess_lang": "txt", + "bbox": [ + 199, + 547, + 269, + 571 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "", + "bbox": [ + 197, + 585, + 362, + 597 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "The merged branches have been successfully deleted in the 'byteblaze/accessible-xml-content-patterns' project.", + "bbox": [ + 199, + 598, + 772, + 625 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "", + "guess_lang": "erb", + "bbox": [ + 199, + 625, + 372, + 656 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "", + "bbox": [ + 199, + 674, + 264, + 685 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "URL: /byte blaze/accessible- html-content-patterns/~/branches", + "bbox": [ + 199, + 686, + 625, + 700 + ], + "page_idx": 32 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "[Omitted] ", + "guess_lang": "txt", + "bbox": [ + 199, + 710, + 269, + 736 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "Here is the fix the agent made: removing the ambiguous DOM selector for \" textbox\", and replacing it with one that first selects the dialog window that contains the textbox, and then the textbox element within it.", + "bbox": [ + 169, + 880, + 823, + 924 + ], + "page_idx": 32 + }, + { + "type": "page_number", + "text": "33", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 32 + }, + { + "type": "text", + "text": "delete_merged_branches", + "text_level": 1, + "bbox": [ + 197, + 104, + 377, + 119 + ], + "page_idx": 33 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "async def deletemergedbranches(page, project_path):\n return (0, 1)\n [0, 1] = 0\n [0, 1] = 1\n [0, 1] = 2\n [0, 1] = 3\n [0, 1] = 4\n [0, 1] = 5\n [0, 1] = 6\n [0, 1] = 7\n [0, 1] = 8\n [0, 1] = 9\n [0, 1] = 10\n [0, 1] = 11\n [0, 1] = 12\n [0, 1] = 13\n [0, 1] = 14\n [0, 1] = 15\n [0, 1] = 16\n [0, 1] = 17\n [0, 1] = 18\n [0, 1] = 19\n [0, 1] = 20\n [0, 1] = 21\n [0, 1] = 22\n [0, 1] = 23\n [0, 1] = 24\n [0, 1] = 25\n [0, 1] = 26\n [0, 1] = 27\n [0, 1] = 28\n [0, 1] = 29\n [0, 1] = 30\n [0, 1] = 31\n [0, 1] = 32\n [0, 1] = 33\n [0, 1] = 34\n [0, 1] = 35\n [0, 1] = 36\n [0, 1] = 37\n [0, 1] = 38\n [0, 1] = 39\n [0, 1] = 40\n [0, 1] = 41\n [0, 1] = 42\n [0, 1] = 43\n [0, 1] = 44\n [0, 1] = 45\n [0, 1] = 46\n [0, 1] = 47\n [0, 1] = 48\n [0, 1] = 49\n [0, 1] = 50\n [0, 1] = 51\n [0, 1] = 52\n [0, 1] = 53\n [0, 1] = 54\n [0, 1] = 55\n [0, 1] = 56\n [0, 1] = 57\n [0, 1] = 58\n [0, 1] = 59\n [0, 1] = 60\n [0, 1] = 61\n [0, 1] = 62\n [0, 1] = 63\n [0, 1] = 64\n [0, 1] = 65\n [0, 1] = 66\n [0, 1] = 67\n [0, 1] = 68\n [0, 1] = 69\n [0, 1] = 70\n [0, 1] = 71\n [0, 1] = 72\n [0, 1] =", + "guess_lang": "txt", + "bbox": [ + 194, + 141, + 799, + 614 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "D.2 Failure Cases and Limitations", + "text_level": 1, + "bbox": [ + 171, + 669, + 439, + 684 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "We observe some failure cases, which seem to be caused by gaps in LLM reasoning capabilities for certain situations.", + "bbox": [ + 169, + 700, + 826, + 729 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "D.2.1 Criteria for Verifying Success", + "text_level": 1, + "bbox": [ + 171, + 760, + 441, + 777 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "Because our criteria for a function to be \"verified\" was to have it be called without producing an exception, we found that occasionally, malfunctioning APIs could be marked as verified simply because they silenced all exceptions that could have occurred. This represents a measure for evaluation having unintended consequences.", + "bbox": [ + 169, + 790, + 823, + 849 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "In the following example, a function was misused, and it was unable to resolve the necessary DOM elements given the page state. However, instead of improving the function's signature or adding a check to ensure the function was called correctly, the LLM adds \"if\" statements to simply avoid any of the atomic actions from producing an error. While this does reduce the number of exceptions, it does not improve the robustness of the API.", + "bbox": [ + 169, + 853, + 825, + 925 + ], + "page_idx": 33 + }, + { + "type": "page_number", + "text": "34", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 33 + }, + { + "type": "text", + "text": "update_productquantity", + "text_level": 1, + "bbox": [ + 197, + 104, + 385, + 119 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "async def update_product_quantity(page, product_name, new_quantity):", + "bbox": [ + 212, + 143, + 712, + 157 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "1 1", + "bbox": [ + 243, + 157, + 266, + 162 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Update the quantity of a specified product in the Magento Admin Panel's Advanced Inventory settings.", + "bbox": [ + 241, + 170, + 766, + 195 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "This function navigates to the product's edit page, opens the Advanced Inventory dialog,", + "bbox": [ + 241, + 207, + 759, + 232 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "and updates the quantity to the specified value. It includes proactive checks for element availability.", + "bbox": [ + 241, + 233, + 759, + 258 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Initial UI State:", + "bbox": [ + 243, + 270, + 367, + 282 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- The user must be logged into the Magento Admin Panel.", + "bbox": [ + 243, + 284, + 645, + 295 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- The function assumes the product edit page is accessible via the '/admin/catalog/product/edit/name/{product_name}' URL.", + "bbox": [ + 243, + 296, + 795, + 321 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Args:", + "bbox": [ + 243, + 335, + 279, + 345 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "page (Page): The playwright page object representing the browser page.", + "bbox": [ + 241, + 347, + 756, + 359 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "product_name(str): The name of the product to update.", + "bbox": [ + 243, + 359, + 638, + 371 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "newquantity(int):The new quantity to set for the product.", + "bbox": [ + 243, + 372, + 683, + 383 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Usage Log:", + "bbox": [ + 243, + 397, + 316, + 409 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- Successfully updated the quantity for 'Arcadio Gym Short' to 50.", + "bbox": [ + 243, + 410, + 727, + 421 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- Adjusted the quantity for 'Yoga Mat' to 100 without issues.", + "bbox": [ + 243, + 422, + 689, + 433 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "- Encountered a timeout error when trying to update 'Sprite Yoga Mat'.", + "bbox": [ + 243, + 435, + 756, + 446 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "Consider checking the availability of the 'Advanced Inventory' button.", + "bbox": [ + 243, + 446, + 759, + 459 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "1 1", + "bbox": [ + 243, + 459, + 266, + 465 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "import re", + "bbox": [ + 243, + 473, + 310, + 484 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "import asyncio", + "bbox": [ + 243, + 486, + 348, + 497 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "await page.goto(f\"/admin/catalog/product/edit/name/{product_name}/\"", + "bbox": [ + 243, + 508, + 741, + 522 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "advanced_inventory_button = page.get_by-role(\"button\", name=\"Advanced Inventory\")", + "bbox": [ + 243, + 523, + 751, + 546 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "if await advanced_inventory_button.count() > 0:", + "bbox": [ + 243, + 547, + 588, + 559 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "await advanced_inventory_button.click()", + "bbox": [ + 243, + 561, + 558, + 573 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "else:", + "bbox": [ + 243, + 574, + 279, + 583 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "print(\"Advanced Inventory button not found.\")", + "bbox": [ + 243, + 585, + 602, + 597 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "return", + "bbox": [ + 272, + 599, + 318, + 609 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "qty_input = page.get_by-role(\"textbox\", name=\"[GLOBAL] Qty\")", + "bbox": [ + 243, + 611, + 684, + 623 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "if await qty_input.count() > 0:", + "bbox": [ + 243, + 625, + 470, + 635 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "await qty_input fills(str(new_quantity))", + "bbox": [ + 272, + 637, + 558, + 648 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "else:", + "bbox": [ + 243, + 648, + 279, + 659 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "print(\"Quantity input not found.\")", + "bbox": [ + 243, + 661, + 633, + 672 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "return", + "bbox": [ + 272, + 675, + 318, + 684 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "done_button = page.get_by-role(\"button\", name=\"Done\")", + "bbox": [ + 243, + 686, + 633, + 698 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "if await done_button.count() > 0:", + "bbox": [ + 243, + 699, + 485, + 710 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "await done_button.click()", + "bbox": [ + 272, + 712, + 455, + 723 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "else:", + "bbox": [ + 243, + 724, + 279, + 734 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "print(\"Done button not found.\")", + "bbox": [ + 243, + 736, + 501, + 748 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "return", + "bbox": [ + 272, + 750, + 318, + 758 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "save_button = page.get_by-role(\"button\", name=\"Save\")", + "bbox": [ + 243, + 762, + 633, + 773 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "if await save_button.count() > 0:", + "bbox": [ + 243, + 773, + 485, + 785 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "await save_button.click()", + "bbox": [ + 272, + 787, + 455, + 799 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "else:", + "bbox": [ + 243, + 800, + 305, + 810 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "print(\"Save button not found.\")", + "bbox": [ + 272, + 811, + 501, + 823 + ], + "page_idx": 34 + }, + { + "type": "text", + "text": "return", + "bbox": [ + 272, + 825, + 318, + 835 + ], + "page_idx": 34 + }, + { + "type": "page_number", + "text": "35", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 34 + }, + { + "type": "image", + "img_path": "images/323149586afd80fbae2733b648b2e5eb7a1c197d854cc5e6542a41dec3475c4b.jpg", + "image_caption": [ + "Figure 6: The agent is unable to successfully verify that the \"add collaborator to project\" works because it does not have access to a list of emails that can appropriately be used as test cases." + ], + "image_footnote": [], + "bbox": [ + 189, + 101, + 803, + 369 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "D.2.2 Creating Functional Test Cases", + "text_level": 1, + "bbox": [ + 171, + 452, + 455, + 468 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "Some skills require interacting with data stored on the website, and their success is conditioned not on whether the UI is accurately interacted with but rather whether the function inputs are realistic. In the following example, the agent attempts to learn how to add collaborators to a Gitlab repository, but is unable to because it does not have a source of real test input data. Future work could explore automatically generating test cases for agents for successful evaluation in dynamic websites.", + "bbox": [ + 169, + 479, + 826, + 564 + ], + "page_idx": 35 + }, + { + "type": "text", + "text": "D.2.3 Ceiling of Agent Capabilities", + "text_level": 1, + "bbox": [ + 171, + 583, + 441, + 599 + ], + "page_idx": 35 + }, + { + "type": "image", + "img_path": "images/4d17a0e8318ad94249d4c8c424867fc0ef14b19c6bc9d7de9e422335dbf683c9.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 173, + 612, + 823, + 922 + ], + "page_idx": 35 + }, + { + "type": "page_number", + "text": "36", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 35 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "- Successfully navigated to the 'Cars for Sale' section, applied filters, and requested quotes for multiple listings. - Encountered issues with automatic navigation using 'Next car' link, resolved by manual URL navigation. import asyncio await page.goto(\"https://www.cars.com/shopping/\") await page.wait_for_load_state(\"networkidle\") await close_overlays(page) await page.get_by_role(\"combobox\", name=\"Make\").select_option(make) await page.get_by-role(\"combobox\", name=\"Model\").select_option(model) show_MATCHes_button = page.get_byRole(\"button\", name=\"Show matches\") if await show_MATCHes_button.count() > 0: await show_MATCHes_button.click() else: print(\"Show matches button not found.\") return await page.wait_for_load_state(\"networkidle\") for _ in range(5): await page.get_byrole(\"textbox\",name $=$ \"First name\").fill(first_name) await page.get_byrole(\"textbox\",name $=$ \"Last name\").fill(last_name) await page.get_byrole(\"textbox\",name $=$ \"Email\").fill(email) await page.get_byrole(\"combobox\",name $=$ \"Subject\").select_option( \"Get a price quote\" ) submit_button = page.get_byrole(\"button\", name=\"Email\") if await submit_button.count() > 0: await submit_button.click() else: print(\"Submit button not found.\") return await asyncio.sleep(2) next_car_link = page.get_byrole(\"link\", name $=$ \"Next car\") if await next_car_link.count() > 0: await next_car_link.click() await page.wait_for_load_state(\"networkidle\") else: print(\"Next car link not found. Navigation stopped.\") break", + "guess_lang": "python", + "bbox": [ + 173, + 101, + 823, + 645 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "D.2.4 Fail to call API", + "text_level": 1, + "bbox": [ + 171, + 659, + 344, + 674 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "The agent does not call available APIs even when they are generated during exploration. As shown in Figure 7, for the task \"Save a hamburger recipe,\" the agent should first call search Recipes_by_cuisine_type('hamburger') to obtain recipe details and then select the print option to save the recipes. However, the agent fails to call this API during the task execution, indicating a gap between exploration and execution phases, suggesting the need for improving the agent's policy to better utilize generated APIs.", + "bbox": [ + 169, + 684, + 826, + 772 + ], + "page_idx": 36 + }, + { + "type": "text", + "text": "search_recipes_by_cuisine_type", + "text_level": 1, + "bbox": [ + 197, + 779, + 439, + 794 + ], + "page_idx": 36 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "async def searchrecipes_by_cuisine_type(page, cuisine_type):\n \"\" Searches for recipes based on a specified cuisine type on Cookpad. This function automates the process of searching for recipes by entering a cuisine type into the search functionality on the Cookpad homepage.", + "guess_lang": "python", + "bbox": [ + 210, + 816, + 790, + 909 + ], + "page_idx": 36 + }, + { + "type": "page_number", + "text": "37", + "bbox": [ + 488, + 946, + 509, + 960 + ], + "page_idx": 36 + }, + { + "type": "image", + "img_path": "images/4e011efbefed4e7c4d0b6eee527d126e6082b24752a6e871c38288561817565e.jpg", + "image_caption": [ + "Figure 7: Screenshot of cookpad.com. The agent fails to call search Recipes_by_cuisine_type('hamburger') to obtain recipe details." + ], + "image_footnote": [], + "bbox": [ + 271, + 101, + 723, + 297 + ], + "page_idx": 37 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "Args: page: The Playwright page object to interact with. cuisine_type (str): The cuisine type to search for, e.g., 'Italian', 'Mexican', 'Korean', 'Chinese', 'American'. Usage log: - Successfully used to search for 'Italian' cuisine recipes, displaying the correct results. import re await page.get_by_role(\"textbox\", name=\"Search by recipe or ingredients\"). fill( cuisine_type ) search_button = page.get_byrole(\"button\", name $\\equiv$ re.compile(\"Search\",re. IgnORECASE)) if await search_button.is Visible(): await search_button.click() else: await pagekeyboard.press(\"Enter\")", + "guess_lang": "python", + "bbox": [ + 173, + 367, + 823, + 698 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "D.2.5 Wrong Parameter", + "text_level": 1, + "bbox": [ + 171, + 710, + 359, + 727 + ], + "page_idx": 37 + }, + { + "type": "text", + "text": "In some cases, the agent correctly identifies the appropriate API but selects incorrect parameters. For example in Figure 8, in the task \"Browse recipes for gluten-free chocolate chip cookies that can be made without nuts,\" the agent incorrectly uses the parameter search Recipes_by Ingredients(page, 'chocolate chip, -nuts'), whereas the correct parameter should be search Recipes_by_Ingredients(page, 'chocolate chip without nuts'). This indicates that the agent needs better parameter selection logic to enhance performance.", + "bbox": [ + 169, + 734, + 826, + 833 + ], + "page_idx": 37 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "search_recipes_by Ingredients\nasync def search_recipes_by Ingredients(page, ingredients):\n \"\"", + "guess_lang": "python", + "bbox": [ + 173, + 840, + 823, + 921 + ], + "page_idx": 37 + }, + { + "type": "page_number", + "text": "38", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 37 + }, + { + "type": "image", + "img_path": "images/539761f5c5718c889d442e4ae9ddab9d0c240e025524852374883354e97c7718.jpg", + "image_caption": [ + "Figure 8: Screenshot of cookpad.com. The agent incorrectly uses the parameter search Recipes_by Ingredients(page, 'chocolate chip, -nuts') instead of the correct parameter search Recipes_by Ingredients(page, 'chocolate chip without nuts')" + ], + "image_footnote": [], + "bbox": [ + 189, + 101, + 803, + 369 + ], + "page_idx": 38 + }, + { + "type": "image", + "img_path": "images/d5dcc4475ce353b83253ee873efa9f0b7597e17fb11fdb9b5d22ade9898c4263.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 173, + 506, + 823, + 906 + ], + "page_idx": 38 + }, + { + "type": "page_number", + "text": "39", + "bbox": [ + 488, + 946, + 508, + 959 + ], + "page_idx": 38 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "async def apply_multiple_filters(page, make, model, distance, zip_code, max_price): # Apply multiple filters on the Cars.com website including make, model, distance, ZIP code, and price range. await page.goto(\"https://www.cars.com/shopping/\") await close_overlays(page) await filterCars_by.make_and_model(page, make, model) await refine_search_by_distance_andzip(page, distance $\\equiv$ distance,zip_code $\\equiv$ zip_code) if max_price and max_price.strip(): price_select $=$ page.locator(#make-model-max-price') if await price_select.count() $>0$ : await price_select.select_option(value $\\equiv$ str(max_price)) else: print(\"Price selector not found\") search_button $=$ page.get_byRole(\"tabpanel\",name $\\equiv$ \"Make\").locator(\"spark-button[datasearchtype $\\equiv$ 'make']\") if await search_button.count() $>0$ : await search_button.click() else: print(\"Search button not found in make panel\")", + "guess_lang": "python", + "bbox": [ + 181, + 108, + 803, + 383 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Figure 9: An example of compositional API that calls multiple other APIs.", + "bbox": [ + 225, + 402, + 764, + 420 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "D.3 Compositional API", + "text_level": 1, + "bbox": [ + 171, + 443, + 364, + 459 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "D.4 Website Roadmap API", + "text_level": 1, + "bbox": [ + 171, + 469, + 388, + 486 + ], + "page_idx": 39 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [], + "code_body": "async def navigate_toCars_for_sale(page): # Navigate to the 'Cars for Sale' section on the Cars.com website, which contains car sales information. import asyncio await page.goto(\"https://www.cars.com/\" cars_for_sale_link = page.get_by-role(\"navigation\", name=\"main menu\").get_by-role( \"link\", name=\"Cars for Sale\") if await cars_for_sale_link.count() > 0: await cars_for_sale_link.click() await asyncio.sleep(5) else: print(\"Cars for Sale link is not visible.\")", + "guess_lang": "python", + "bbox": [ + 181, + 510, + 772, + 696 + ], + "page_idx": 39 + }, + { + "type": "text", + "text": "Figure 10: An example of roadmap information contained in the synthesized APIs. The docstring of this API contains the corresponding functionality of this API. If the agent is new to this website, this function might require some exploration to get.", + "bbox": [ + 169, + 717, + 826, + 762 + ], + "page_idx": 39 + }, + { + "type": "page_number", + "text": "40", + "bbox": [ + 488, + 946, + 509, + 959 + ], + "page_idx": 39 + } +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07079/119db10f-4b86-43b8-bde3-42f792f5a6e7_model.json b/data/2025/2504_07xxx/2504.07079/119db10f-4b86-43b8-bde3-42f792f5a6e7_model.json new file mode 100644 index 0000000000000000000000000000000000000000..ffd65a66beb10e27b6e00997791c6b31451b0c76 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/119db10f-4b86-43b8-bde3-42f792f5a6e7_model.json @@ -0,0 +1,7100 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.277, + 0.061, + 0.701 + ], + "angle": 270, + "content": "arXiv:2504.07079v1 [cs.AI] 9 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.099, + 0.826, + 0.141 + ], + "angle": 0, + "content": "SkillWeaver: Web Agents can Self-Improve by Discovering and Honing Skills" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.166, + 0.888, + 0.214 + ], + "angle": 0, + "content": "Boyuan Zheng\\(^{1*}\\), Michael Y. Fatemi\\(^{2*}\\), Xiaolong Jin\\(^{3*}\\), Zora Zhiruo Wang\\(^{4}\\), Apurva Gandhi\\(^{4}\\), Yueqi Song\\(^{4}\\), Yu Gu\\(^{1}\\), Jayanth Srinivasa\\(^{5}\\), Gaowen Liu\\(^{5}\\), Graham Neubig\\(^{4}\\), Yu Su\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.219, + 0.783, + 0.238 + ], + "angle": 0, + "content": "1 The Ohio State University 2 University of Virginia 3 Purdue University" + }, + { + "type": "text", + "bbox": [ + 0.331, + 0.236, + 0.684, + 0.252 + ], + "angle": 0, + "content": "4 Carnegie Mellon University 5 Cisco Research" + }, + { + "type": "text", + "bbox": [ + 0.421, + 0.252, + 0.621, + 0.266 + ], + "angle": 0, + "content": "{zheng.2372, su.809}@osu.edu" + }, + { + "type": "title", + "bbox": [ + 0.459, + 0.301, + 0.538, + 0.317 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.333, + 0.769, + 0.614 + ], + "angle": 0, + "content": "To survive and thrive in complex environments, humans have evolved sophisticated self-improvement mechanisms through environment exploration, hierarchical abstraction of experiences into reusable skills, and collaborative construction of an ever-growing skill repertoire. Despite recent advancements, autonomous web agents still lack crucial self-improvement capabilities, struggling with procedural knowledge abstraction, skill refinement, and skill composition. In this work, we introduce SKILLWEAKER, a skill-centric framework that enables agents to self-improve by autonomously synthesizing reusable skills as APIs. Given a new website, the agent autonomously discovers skills, executes them for practice, and distills practice experiences into robust APIs. Iterative exploration continuously expands a library of lightweight, plug-and-play APIs, significantly enhancing the agent's capabilities. Experiments on WebArena and real-world websites demonstrate the efficacy of SKILLWEAKER, achieving relative success rate improvements of \\(31.8\\%\\) and \\(39.8\\%\\), respectively. Additionally, APIs synthesized by strong agents substantially enhance weaker agents through transferable skills, yielding improvements of up to \\(54.3\\%\\) on WebArena. These results demonstrate the effectiveness of honing diverse website interactions into APIs, which can be seamlessly shared among various web agents.\\(^{1}\\)" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.64, + 0.32, + 0.655 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.671, + 0.828, + 0.84 + ], + "angle": 0, + "content": "AI agents based on large language models (LLMs) that can browse the web (Deng et al., 2023; Zhou et al., 2024a; Zheng et al., 2024) or use computers (Xie et al., 2024) like humans are rapidly rising as a new frontier of AI research and application. Despite these promising opportunities, digital environments present substantial challenges due to their inherent complexity and diversity. Website environments are highly intricate, consisting of numerous interactive elements that create large action spaces. An even greater challenge lies in developing generalist web agents capable of generalizing to out-of-distribution task types and adapting to novel websites. Existing efforts have attempted to train web agents using large-scale trajectory datasets collected across diverse websites and task types (Li et al., 2024; Pahuja et al., 2025). However, these agents often struggle with overfitting to specific website structures and task distributions (Li et al., 2024; Zheng et al., 2024), reducing their ability to handle previously unseen environments effectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.845, + 0.828, + 0.888 + ], + "angle": 0, + "content": "As an integral feat of human intelligence, self-improvement presents a promising solution to these challenges. Imagine a user visiting 'yelp.com' for the first time. In the beginning, she may not be familiar with the layout and functionalities provided by the website, so she" + }, + { + "type": "page_footnote", + "bbox": [ + 0.19, + 0.896, + 0.816, + 0.911 + ], + "angle": 0, + "content": "*Equal Contribution. Work done during Michael and Xiaolong's internship at OSU NLP Group." + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.911, + 0.72, + 0.924 + ], + "angle": 0, + "content": "1All resources are available at https://github.com/OSU-NLP-Group/SkillWeaver." + }, + { + "type": "list", + "bbox": [ + 0.19, + 0.896, + 0.816, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.504, + 0.96 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.174, + 0.101, + 0.346, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.349, + 0.101, + 0.599, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.601, + 0.101, + 0.813, + 0.192 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.2, + 0.346, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.348, + 0.2, + 0.597, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.6, + 0.2, + 0.81, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.285, + 0.828, + 0.385 + ], + "angle": 0, + "content": "Figure 1: An overview of the SKILLWEAKER framework. The Skill Proposal module (Stage I) identifies novel skills to practice based on observations of the environment and available APIs in the skill library. For each proposed skill, the agent executes it to generate trajectories, which are later evaluated by the reward model. If successful, the trajectory is utilized to synthesize an API (Stage II). To ensure robustness, the synthesized API undergoes testing with automatically generated test cases and debugging within the Skill Honing module (Stage III)." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.41, + 0.828, + 0.567 + ], + "angle": 0, + "content": "would explore the website and think about what tasks can be done. As she does more tasks on the website (e.g., searching for restaurants with various filters), common routines become memorized procedural knowledge (e.g., searching for high-end Korean restaurants entails three steps: type 'Korean' in the top search bar, click the 'Search' button shaped like a magnifier, and click the $$$$$' button on the search result page). Such routines abstracted from experiences essentially become high-level actions or skills that can be effortlessly summoned and carried out instead of re-learning things on the fly; they can even be composed into more complex routines, e.g., making a reservation at a restaurant involves first finding it. Similarly, through self-improvement, agents can build conceptual maps of the website environment, accumulate procedural knowledge as skills, compose simple skills into more complex ones, and leverage these learned skills to enhance their decision-making processes." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.57, + 0.828, + 0.797 + ], + "angle": 0, + "content": "A few recent studies have tried to capture some facets of humans' self-improvement capability. Traditional methods typically store skills implicitly through action trajectories, primarily leveraging them as demonstrations for in-context learning (Murty et al., 2024b) or fine-tuning (Murty et al., 2024a; Su et al., 2025; Pahuja et al., 2025). Although these trajectory-based approaches can be effective, they struggle to explicitly abstract reusable procedural knowledge, resulting in heavy training demands and limited generalization to new websites and tasks. Furthermore, continuously updating models with new trajectories introduces significant concerns such as catastrophic forgetting and sensitivity to website changes. Additionally, storing and sharing extensive memory-intensive trajectory data also poses practical challenges for knowledge transfer among agents. Efforts like Agent Workflow Memory (Wang et al., 2024e) and ICAL (Sarch et al., 2024) take this a step further by generating abstract, reusable routines. However, its natural language-based routines pose challenges for formal verification and precise composition into new workflows. Both methods also focus on online learning with access to test queries or offline learning requiring high-quality annotated demonstrations. Instead, we focus on more challenging autonomous exploration where agents propose novel tasks without external supervision." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.801, + 0.829, + 0.888 + ], + "angle": 0, + "content": "To this end, we propose SKILLWEAKER, a skill-centric framework that enables web agents to autonomously self-improve by exploring website environments and synthesizing reusable, structured skills (§ 2). Our framework leverages the ability of LLMs to iteratively curate APIs based on feedback from the interactive environment (Wang et al., 2023a; 2024d). Specifically, SKILLWEAKER comprises a three-stage pipeline (Figure 1): (1) systematic exploration of website environments to identify potential skills, (2) practicing and converting these skills" + }, + { + "type": "page_footnote", + "bbox": [ + 0.171, + 0.897, + 0.825, + 0.926 + ], + "angle": 0, + "content": "For example, screenshot and HTML files consume approximately 0.3 GB per trajectory with an average of 7.3 actions in Mind2Web (Deng et al., 2023), making them hard to transfer on the fly." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "into robust, reusable APIs, and (3) testing and debugging to ensure API reliability during inference." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.139, + 0.828, + 0.293 + ], + "angle": 0, + "content": "Results on WebArena (Zhou et al., 2024a) demonstrate substantial performance improvements after equipping web agents with APIs synthesized by SKILLWEAVER. We observe relative gains in success rate from \\(25\\%\\) to \\(38\\%\\) with just 160 iterations of the three-stage pipeline. Further experiments with weaker agents revealed even more dramatic improvements, ranging from \\(40\\%\\) to \\(130\\%\\). These results underscore two insights: (1) synthesized APIs can substantially enhance agent performance, and (2) weaker agents particularly benefit from APIs generated by more powerful counterparts, effectively distilling advanced knowledge into plug-and-play modules. To further validate the practicality of our method, we conduct experiments on real-world websites using tasks sampled from Online-Mind2Web (Xue et al., 2025; Deng et al., 2023). SKILLWEAVER achieved a \\(39.8\\%\\) relative improvement in success rate, demonstrating its effectiveness in complex, dynamic web environments." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.317, + 0.318, + 0.334 + ], + "angle": 0, + "content": "2 SkillWeaver" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.352, + 0.828, + 0.506 + ], + "angle": 0, + "content": "Our objective is to develop a framework that enables web agents to autonomously discover skills and construct a continuously expanding skill library in the form of APIs. In our work, \"API\" refers to a Python function containing Playwright code for a browser automation, rather than a (for example) REST API that handles HTTP requests. As illustrated in Figure 1, the exploration pipeline consists of three key modules: (1) Skill Proposal: Identifying new skills (e.g., a short description such as \"Identify pill using pill identifier\") for the agent to practice, facilitating the continuous discovery of relevant functionalities; (2) Skill Synthesis: Practicing the proposed skills and converting practiced trajectories into reusable Python functions, encapsulating learned behaviors into structured APIs; (3) Skill Honing: Testing and debugging the synthesized APIs using feedback from the environment and the reward model to ensure reliability." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.527, + 0.379, + 0.543 + ], + "angle": 0, + "content": "2.1 Stage I: Skill Proposal" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.555, + 0.829, + 0.696 + ], + "angle": 0, + "content": "Website environments encompass numerous underlying functionalities that require systematic exploration. LLMs have been employed as automatic curriculum (Tajwar et al., 2025) for exploring open-ended environments, like Minecraft (Wang et al., 2023a; Du et al., 2023), TextWorld (Song et al., 2024a), and household settings (Du et al., 2023). Similarly, we leverage LLMs as an automatic curriculum for discovering website functionalities, leveraging their internet-scale knowledge and HTML interpretation capabilities. To facilitate the understanding of website environments, we provide LLMs with detailed webpage observations, including screenshots, website names, URLs, and accessibility trees. Accessibility trees are particularly useful for understanding less familiar websites, such as CMS in WebArena, because they provide comprehensive structural and functional information." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.701, + 0.826, + 0.801 + ], + "angle": 0, + "content": "In contrast to existing approaches (Zhou et al., 2024b; Murty et al., 2024b;a; Song et al., 2024a), we emphasize skill diversity by explicitly prompting LLMs to propose novel and reusable skills beyond the current skill repertoire. To ensure efficient exploration and high success rates in subsequent skill practice and API synthesis stages, we specifically target short-horizon, reusable skills that can be completed within a single API call. Our approach follows a curriculum progression from simple to complex skill compositions. The LLM is instructed to propose the following three types of tasks (Prompts in Appendix § A.1):" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.805, + 0.828, + 0.877 + ], + "angle": 0, + "content": "Procedural Tasks. Procedural tasks require a sequence of atomic actions to achieve higher-level process automation objectives. Each procedural task corresponds to a workflow comprising actions that can be generalized to complete similar tasks. For example, the task identifying pills based on imprint and color involves a workflow where values are entered into textboxes, followed by clicking a submit button to finalize the search operation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.882, + 0.826, + 0.926 + ], + "angle": 0, + "content": "Navigational Tasks. Navigational tasks involve systematically exploring various sections or pages within a website. Through these tasks, agents can construct conceptual maps that capture the functionality of various webpages within the website. Examples include" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.133 + ], + "angle": 0, + "content": "navigating to the \"customer reviews\" section on a product management website and accessing individual user profiles." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.139, + 0.825, + 0.21 + ], + "angle": 0, + "content": "Information-Seeking Tasks. Information-seeking tasks involve scraping detailed data from webpages (e.g., extracting all commits from a GitHub repository). Enumerating all available items on webpages, such as comprehensive lists of reviews or faculty members, is typically challenging and requires extensive, repetitive actions. To address this challenge, generating specialized scraping APIs enables agents to efficiently retrieve all relevant information." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.235, + 0.393, + 0.25 + ], + "angle": 0, + "content": "2.2 Stage II: Skill Synthesis" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.264, + 0.827, + 0.378 + ], + "angle": 0, + "content": "This module aims at automatically generating robust and reusable APIs, consisting of the following three components: Skill Practice, Reward Model, and API Synthesis. A critical challenge in this process is ensuring the robustness of the generated APIs, as each individual module may introduce errors. We address this challenge through curriculum progression—advancing from simple to complex skill compositions—which improves accuracy since modules perform better on simpler tasks requiring fewer steps. Additionally, the subsequent Skill Honing module (Stage III) validates APIs through unit testing and debugging to ensure reliability." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.382, + 0.825, + 0.48 + ], + "angle": 0, + "content": "Skill Practice. Given a task proposed in Stage I, the agent takes actions to complete it. For procedural and navigational tasks, we leverage a base agent to generate actions to repeatedly attempt to complete tasks. The agent repeatedly attempts to complete the tasks proposed in the previous section, using the reward model to determine successful completion. For information-seeking tasks, we synthesize data extraction code to gather target information from webpages, incorporating any necessary navigation steps, such as retrieving historical orders or customer information." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.487, + 0.827, + 0.6 + ], + "angle": 0, + "content": "Reward Model. LLMs have demonstrated effectiveness as evaluators of correctness, both for answer accuracy (Zheng et al., 2023) and for determining whether action sequences successfully complete task instructions (Pan et al., 2024; Zhuge et al., 2024). We prompt an LLM to provide reward signals indicating task completion success with the following observations in the context: (1) Task description, (2) Action trajectory comprising a sequence of actions with screenshots and corresponding descriptions, (3) Environmental feedback, including code execution outcomes and observable website changes. Further details can be found in Appendix § A.5." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.605, + 0.422, + 0.926 + ], + "angle": 0, + "content": "API Synthesis. This module encapsulates the agent's actions from successful trajectories into a reusable and generalized API. This is performed by creating a string representation of each of the state-action pairs in the trajectory and prompting the language model to generate a Python implementation. We statically analyze the function for common generation mistakes, as described in Appendix S A.6, and if any are detected, we prompt the model to generate the function again. As illustrated in Figure 2, each API includes a Python implementation (including a function signature, docstring, and code body), where the docstring particularly contains a usage log recording previous executions of the function and a description of the prerequisite state" + }, + { + "type": "image", + "bbox": [ + 0.429, + 0.608, + 0.829, + 0.882 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.43, + 0.901, + 0.828, + 0.943 + ], + "angle": 0, + "content": "Figure 2: An example of a synthesized API from Drugs.com used to identify pills based on their characteristics." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.172, + 0.104, + 0.421, + 0.133 + ], + "angle": 0, + "content": "of the website for the function to execute without errors." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.155, + 0.383, + 0.172 + ], + "angle": 0, + "content": "2.3 Stage III: Skill Honing" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.184, + 0.828, + 0.269 + ], + "angle": 0, + "content": "Despite significant efforts within the above modules to ensure the robustness of the synthesized API, it is not always guaranteed. To address this limitation, we integrate a stage to test and debug synthesized APIs. For APIs requiring no extra parameters except the by default Playwright page instance, we execute the API directly as a standalone unit test. For APIs requiring additional parameters, we leverage the LLM to generate appropriate parameter values that serve as comprehensive test cases." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.294, + 0.319, + 0.312 + ], + "angle": 0, + "content": "3 Experiments" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.328, + 0.828, + 0.414 + ], + "angle": 0, + "content": "We conducted experiments on both WebArena (Zhou et al., 2024a) and real-world live websites to evaluate our agent. WebArena is a widely used benchmark for web agent evaluation, providing a diverse set of websites designed to simulate real-world web interactions. Real-world websites further demonstrate the performance of our method in more diverse, dynamic, and complex website environments. We perform exploration on each website environment separately to derive the API library, as described in Section § 3.4." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.435, + 0.294, + 0.45 + ], + "angle": 0, + "content": "3.1 WebArena" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.463, + 0.828, + 0.59 + ], + "angle": 0, + "content": "WebArena is a self-hostable, sandboxed web environment designed for developing and evaluating web agents. It simulates real-world websites across five common application domains: e-commerce (Shopping), social forums (Reddit), collaborative software development (Gitlab), content management (CMS), and navigation (Map). This benchmark includes a total of 812 tasks, each with automated evaluation metrics based on functional correctness. WebArena provides a stable testing environment with reliable automatic evaluation, making it a good platform for demonstrating the efficacy of our agent and analyzing various aspects of our approach. Further details on the WebArena benchmark used in our evaluation can be found in Appendix § B." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.595, + 0.827, + 0.68 + ], + "angle": 0, + "content": "Human-Crafted APIs. The self-hosted WebArena sandbox environment grants access to its source code and administrative access for the simulated websites. This enables us to extract APIs that adhere to standardized protocols (e.g., REST) from the official documentation within the source code as well as from external sources (Song et al., 2024a). We further leverage these human-crafted official APIs to compare them against the APIs synthesized by SKILLWEAVER, demonstrating the quality of the generated APIs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.702, + 0.371, + 0.716 + ], + "angle": 0, + "content": "3.2 Real-World Websites" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.73, + 0.829, + 0.925 + ], + "angle": 0, + "content": "Real-world websites present greater complexity and richness than simulated environments. To demonstrate the practicality and efficacy of our approach in real-world environments, we conduct evaluations on live websites. Specifically, we use Online-Mind2Web (Xue et al., 2025; Deng et al., 2023), a benchmark specifically designed for evaluating web agents on live websites. This benchmark encompasses 300 tasks across 136 popular websites spanning diverse domains. The tasks represent realistic scenarios commonly encountered in daily web interactions, as proposed by human annotators. Considering the cost of exploration on all the websites, we only consider websites with a sufficient number of tasks that can be accessed by Playwright. We end up with 4 websites that have at least 8 tasks to run online evaluations (Zheng et al., 2024; Yoran et al., 2024; He et al., 2024) and include 57 tasks, listed in Appendix § C. To ensure the validity of evaluation, we manually evaluate the success of agent trajectories. Specifically, we verify whether the actions fulfill all requirements posed by the task and whether retrieved information matches the target for information-seeking tasks." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.104, + 0.383, + 0.12 + ], + "angle": 0, + "content": "3.3 Agent Implementation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.133, + 0.825, + 0.163 + ], + "angle": 0, + "content": "By default, we use GPT-4o3 with a temperature of 0.3. Following the default WebArena evaluation settings, we set the maximum number of steps per iteration to 10." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.169, + 0.827, + 0.254 + ], + "angle": 0, + "content": "Baseline Agent. We implement a web agent with only browsing actions based on Code-Act (Wang et al., 2024b), which leverages an LLM to generate Python code using the Playwright browser automation library to interact with web environments through simple atomic actions such as click, type, and scroll. The observation space consists of webpage screenshots, an accessibility tree, and the execution results from previous actions, such as exceptions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.259, + 0.828, + 0.372 + ], + "angle": 0, + "content": "Agent with Skills. To demonstrate the performance improvements gained from APIs, we extend the action space of the baseline agent by incorporating synthesized skills, enabling the agent to execute API calls. We leverage this agent to evaluate the performance improvements from both APIs synthesized by SKILLWEAVER and human-crafted APIs (Song et al., 2024b) from WebArena's official documentation. Given the extensive number of synthesized APIs, we introduce an API selection module that filters only relevant APIs from the API library. This module also removes APIs that do not satisfy pre-conditions. The prompt and skill serialization format can be found in Appendix § A.8." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.377, + 0.826, + 0.424 + ], + "angle": 0, + "content": "Weaker Agent. To evaluate whether the synthesized APIs can plug and play in weaker agents, we implement a weaker variant by replacing GPT-4o with GPT-4o-mini4 while maintaining the same agent design." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.442, + 0.303, + 0.459 + ], + "angle": 0, + "content": "3.4 Exploration" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.469, + 0.827, + 0.583 + ], + "angle": 0, + "content": "During the exploration stage, we employ the agent described above to practice skills. Each website undergoes an exploration process consisting of 160 iterations with GPT-4o, where an iteration is defined as either attempting a proposed skill or testing an existing skill. Within each iteration, the agent proposes either a procedural or navigational task at the starting webpage. It can also propose an information-seeking task at the ending webpage after completing the procedural or navigational task in the previous iteration. During exploration, the agent can access APIs synthesized in the previous exploration steps after skill selection to compose more complex APIs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.604, + 0.396, + 0.623 + ], + "angle": 0, + "content": "4 Results and Analysis" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.638, + 0.373, + 0.654 + ], + "angle": 0, + "content": "4.1 Experimental Results" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.664, + 0.825, + 0.735 + ], + "angle": 0, + "content": "WebArena Results. Our experiments on WebArena demonstrate consistent performance improvements with the integration of synthesized APIs. As illustrated in Table 1, we observe a substantial relative improvement in success rate, \\(39.8\\%\\) on average, for the baseline agent with GPT-4o and an even larger improvement of \\(54.3\\%\\) with GPT-4o-mini across the evaluated websites." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.741, + 0.827, + 0.882 + ], + "angle": 0, + "content": "When compared to AutoEval (Pan et al., 2024), which leverages an LLM-based reward model to guide inference-time exploration, SKILLWEAKER achieves higher average success rates and exhibits better or comparable performance across all domains, with the exception of the Shopping environment. The reason behind this exception is the inherent requirements for more extensive interaction with such websites for partially observable information, like dynamic product search results and product details. In comparison with SteP (Sodhi et al., 2024), which incorporates the external memory of domain-specific human-written workflows, SKILLWEAKER achieves better performance on CMS and Map environments, showing the promising quality of agent synthesized APIs even comparing with manually crafted workflows designed by human experts." + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.895, + 0.441, + 0.911 + ], + "angle": 0, + "content": "3Azure Endpoint: gpt-4o-2024-08-06" + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.911, + 0.477, + 0.924 + ], + "angle": 0, + "content": "4Azure Endpoint: gpt-4o-mini-2024-07-18" + }, + { + "type": "list", + "bbox": [ + 0.191, + 0.895, + 0.477, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.283, + 0.108, + 0.716, + 0.277 + ], + "angle": 0, + "content": "
MethodGitlabMapShoppingCMSRedditAVG.
WebArena15.015.613.910.46.612.3
AutoEval25.027.539.620.920.826.9
*SteP32.030.037.024.059.033.0
SKILLWEAVER
GPT-4o17.827.519.818.737.722.6
+ Skills22.233.927.225.850.029.8
Δ↑25%↑23%↑38%↑38%↑33%↑32%
GPT-4o-mini6.110.311.83.318.99.2
+ Skills8.916.717.17.726.414.1
Δ↑46%↑62%↑46%↑133%↑40%↑45%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.287, + 0.825, + 0.318 + ], + "angle": 0, + "content": "Table 1: Task success rate on WebArena. The numbers in green represent the relative improvement percentage." + }, + { + "type": "table", + "bbox": [ + 0.314, + 0.332, + 0.684, + 0.399 + ], + "angle": 0, + "content": "
MethodDrugFlightCookingCarAVG.
Baseline65.011.762.511.140.2
+ Skills87.029.475.011.156.2
Δ↑34%↑151%↑20%↑0%↑40%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.199, + 0.41, + 0.798, + 0.424 + ], + "angle": 0, + "content": "Table 2: Success rate in real-world website evaluation based on Online-Mind2Web." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.45, + 0.828, + 0.55 + ], + "angle": 0, + "content": "Live Website Results. As shown in Table 2, integrating synthesized APIs leads to an average relative improvement of \\(39.8\\%\\) in success rate across four websites. For the Car website, while the final success rate remained unchanged after incorporating synthesized APIs, we observed that in four out of nine tasks, the synthesized APIs successfully guided the agent to the final state required for task completion. However, the agent still encountered failures during the final steps that demanded strong environment understanding and visual reasoning." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.569, + 0.28, + 0.585 + ], + "angle": 0, + "content": "4.2 Analysis" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.596, + 0.825, + 0.669 + ], + "angle": 0, + "content": "Generalization across Website. Generalization across diverse websites, particularly less common websites, remains a significant challenge. SKILLWEAVER presents a new angle to this challenge by integrating website-specific skills and knowledge through external memory in synthesized skill files. SKILLWEAVER is able to encapsulate skills and knowledge on website environments into code files containing accumulated skills." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.673, + 0.825, + 0.759 + ], + "angle": 0, + "content": "To demonstrate the viability of such generalization, we don't include any website specific in the agent and adapt the agent to different websites by purely integrating synthesized API libraries generated during pre-hoc website explorations. As shown in both Table 1 and Table 2, our agent is not only able to achieve stable performance improvements across all 5 WebArena websites but also in the 4 real-world live websites that are more complex and noisy." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.764, + 0.825, + 0.837 + ], + "angle": 0, + "content": "Generalization across Agents. A critical feature of the skill repertoire is its ability to generalize across different web agents. Once the exploration process is completed, the synthesized API library can be reused to expand and enhance the action space of other agents capable of API calling. We conduct a comparative experiment by replacing the backbone language model with GPT-4o-mini without any other modification to the agent." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.84, + 0.829, + 0.927 + ], + "angle": 0, + "content": "According to the results in Table 1, after switching the language model to GPT-4o-mini, the performance of the baseline agent is substantially weaker than other agents based on GPT-4o. However, upon integration with our synthesized APIs, the agent based on GPT-4o-mini demonstrates remarkable relative success rate improvements ranging from \\(40\\%\\) to \\(133\\%\\) across five websites. Notably, this agent even achieves a better average success rate compared to the WebArena agent based on GPT-4o, particularly in the Map, Shopping, and" + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.948, + 0.506, + 0.96 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.135 + ], + "angle": 0, + "content": "Reddit websites. These results highlight the transferability of our synthesized APIs as a plug-and-play module to enhance agents." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.139, + 0.827, + 0.281 + ], + "angle": 0, + "content": "As the base agents become stronger, particularly those capable of more sophisticated inference-time search (e.g., Operator), the potential of SKILLWEAVER can be further unleashed. While current agents can accomplish challenging tasks through trial-and-error (Song et al., 2024a) and search over webpages Koh et al. (2024), certain complex skills remain beyond their capabilities. For example, as illustrated in Appendix D.2.3, the task request quote_for_multiple listings requires repeated, sequential, successful searches for specific vehicles, followed by separate quote requests. This process demands both long-range planning and back-tracing capabilities, which present significant challenges for existing agents. As agents grow stronger, SKILLWEAVER will enable the synthesis of increasingly sophisticated and complex skills." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.285, + 0.828, + 0.343 + ], + "angle": 0, + "content": "Comparison with Human-Crafted APIs. We further conducted a comparison of the performance of agent with synthesized skills with the human-crafted APIs extracted from the official documentation and outsourcing websites (Song et al., 2024a). Based on the level of API support, websites are classified into three categories: low, medium, and high." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.348, + 0.399, + 0.6 + ], + "angle": 0, + "content": "As shown in Figure 3, the performance of the synthesized APIs is comparable with human-crafted APIs on websites with low API support, such as Reddit, and those with medium API support, like Shopping. This result suggests that our exploration process is capable of generating APIs with quality that is comparable to or even superior to manually crafted APIs found in official documentation. On websites with high API support, such as GitLab and Maps, the performance of the synthesized APIs is not as good." + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.353, + 0.825, + 0.564 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.405, + 0.567, + 0.825, + 0.595 + ], + "angle": 0, + "content": "Figure 3: Success rate with synthesized vs. human-crafted APIs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.624, + 0.31, + 0.637 + ], + "angle": 0, + "content": "4.3 Case Studies" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.653, + 0.827, + 0.752 + ], + "angle": 0, + "content": "Emergence of Compositional APIs. After a certain number of exploration iterations, we observe that the pipeline begins to generate compositional APIs that call multiple simpler APIs. An example is shown in § D.3. This API is responsible for applying multiple filters to search results that involves sequentially calling another API to close a webpage overlay, followed by two additional APIs to refine the search. This emergent behavior indicates an increasing level of abstraction and efficiency in SKILLWEAKER's ability to synthesize and integrate discovered skills." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.757, + 0.827, + 0.926 + ], + "angle": 0, + "content": "Limitation in Skill Use. LLMs like GPT-4o are still not robust enough at API calling, and that hurts API-augmented agents like ours, even with human-crafted APIs. This challenge is even greater for weaker LLMs such as GPT-4o-mini, as illustrated in Table 1. We identify two primary categories of failures: (1) failure to identify the appropriate API and (2) generating wrong parameters. For example, SKILLWEAKER synthesized APIs to augment agents to search recipes in Cookpad.com. As demonstrated in Appendix D.2.4, the LLM is not able to identify the search Recipes_by_cuisine_type('hamburger') API to finish the task \"Save a hamburger recipe\". In Appendix D.2.5, the LLM successfully identifies the right API search Recipes_by Ingredients(page, 'ingredients') to complete the task \"Browse recipes for gluten-free chocolate chip cookies that can be made without nuts\". However, it generates the wrong keyword 'chocolate chip, -nuts' instead of 'chocolate chip without nuts', leading to empty search results." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.505, + 0.96 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.33, + 0.119 + ], + "angle": 0, + "content": "5 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.134, + 0.83, + 0.358 + ], + "angle": 0, + "content": "Web Agents. Automated web agents have recently emerged as a critical research direction with the vision of automating workflows in the internet. Substantial progress in web agents has been achieved by leveraging powerful LLMs alongside diverse prompting strategies to automate the prediction of web page actions (Zheng et al., 2024; Furuta et al., 2024; Zhou et al., 2024a; Yao et al., 2022). Given inputs like HTML content, accessibility trees, and screenshots, LLM-based approaches use in-context examples, skill usage, and the ReAct framework (Yao et al., 2023) to improve action prediction. These models can be further enhanced with action-coordinate pair data for better grounding (Cheng et al., 2024; Gou et al., 2024; You et al., 2024), learning from multiple web agent trajectories to improve multi-step reasoning and planning (Lai et al., 2024; Shaw et al., 2023; Deng et al., 2023), or training the agent to directly output pixel coordinates (Hong et al., 2024). Another line of work uses planning. Tree search agent (Koh et al., 2024) performs a best-first tree search using an LLM as a heuristic, and AgentQ (Putta et al., 2024) and WebPilot (Zhang et al., 2024) employ Monte Carlo Tree Search. The World-model-augmented web agent (Chae et al., 2024) and WebDreamer (Gu et al., 2024b) integrate LLM-based world models to anticipate the outcomes of its actions." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.363, + 0.828, + 0.492 + ], + "angle": 0, + "content": "Tool Use and Synthesis. With the great potentials exhibited by tool-augmented LMs (Schick et al., 2023; Wang et al., 2024c), many works explore using LLMs to make tools across math reasoning (Cai et al., 2024; Qian et al., 2023; Yuan et al., 2024a), theorem proving (Wang et al., 2024a), structured data analysis (Lu et al., 2023; Wang et al., 2024d), and digital agent automation (Wang et al., 2023a; Gu et al., 2024a; Song et al., 2024b). However, existing methods rely on a set of existing training examples to determine tool correctness (Yuan et al., 2024a), or user input queries to bootstrap the tool-making system (Wang et al., 2024d). Our work does not necessitate annotated data and enables agents to gather experiences by self-exploring the environment while synthesizing more skills." + }, + { + "type": "text", + "bbox": [ + 0.175, + 0.495, + 0.83, + 0.777 + ], + "angle": 0, + "content": "Self-Improvement. Many works explore collecting trajectories through experience, rating the success with a reward model, and using the results for policy training (Patel et al., 2024; Huang et al., 2023; Madaan et al., 2023; Wang et al., 2023b; Singh et al., 2024; Chen et al., 2024; Yuan et al., 2024b). These approaches can be broadly categorized into parametric and non-parametric approaches. Parametric training with exploration trajectories has been investigated using various strategies, such as supervised fine-tuning (e.g., WebGUM (Furuta et al., 2024), Patel et al. (2024), and ScribeAgent (Shen et al., 2024)) and reinforcement learning-style optimization (e.g., ETO (Song et al., 2024a) and PAE (Zhou et al., 2024b)). Non-parametric methods typically transform exploration trajectories into reusable APIs or workflows that can be composed into more complex skills, like Voyager (Wang et al., 2023a) an LLM-based lifelong learning agent that continually explores the Minecraft environment. Friday (Wu et al., 2024) also investigates self-improvement on operating systems through exploration with pre-defined learning objectives. For web agents, the collected trajectories can be used as examples for in-context learning (Murty et al., 2024b), provide targets for fine-tuning (Furuta et al., 2024; Patel et al., 2024; Song et al., 2024a), and be distilled into text instructions to reproduce the desired behavior (workflows) (Wang et al., 2024e). Our work focuses on enabling web agents to continuously expand their skill repertoire by autonomously exploring website environments and distilling successful trajectories into reusable skills represented as Python APIs. These APIs are created to enable the agent to operate more efficiently during subsequent exploration and task execution." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.794, + 0.31, + 0.81 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.827, + 0.829, + 0.928 + ], + "angle": 0, + "content": "Web agents aim to automate browsing tasks to enhance human productivity across diverse digital environments. A key challenge lies in adapting to real-world websites characterized by high diversity and complexity. Inspired by the self-improvement mechanism of humans, we propose SKILLWEAYER, a skill-centric framework that enables web agents to autonomously self-improve through exploration and API synthesis. Experimental results demonstrate substantial performance improvements, with relative success rate gains of \\(31.8\\%\\) on WebArena benchmark and \\(39.8\\%\\) on real-world websites. Notably, weaker agents" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.949, + 0.506, + 0.96 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.104, + 0.825, + 0.162 + ], + "angle": 0, + "content": "equipped with skills from more powerful counterparts showed improvements of up to \\(54.3\\%\\), demonstrating effective knowledge distillation through lightweight, transferable modules. Our findings point to a promising future where web agents can learn to improve themselves without parameter tuning by evolving a collective skill base." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.182, + 0.347, + 0.202 + ], + "angle": 0, + "content": "Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.215, + 0.825, + 0.245 + ], + "angle": 0, + "content": "We would like to extend our appreciation to colleagues from the OSU NLP group and Anqi Feng for their insightful comments. This work is supported by a gift from Cisco." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.266, + 0.328, + 0.283 + ], + "angle": 0, + "content": "Ethics Statement" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.298, + 0.828, + 0.523 + ], + "angle": 0, + "content": "Generalist web agents hold the potential to automate routine web tasks, enhance user experiences, and promote web accessibility, safety concerns related to their real-world deployment are also critical. These concerns span privacy issues, such as access to users' personal profiles, and sensitive operations, such as financial transactions or application form submissions. During online evaluation, we noticed the possibility of these web agents generating harmful actions on the web, and we manually validated the safety of all the actions before execution. Exploration on live website may lead to potential safety concerns about trigger harmful actions. In case the agent is being used on a live website, we add safety instructions which guardrail the agent away from actions that could potentially have side effects or that would otherwise be disadvantageous to explore, such as creating accounts or interacting with human users. It is critical for further research to thoroughly assess and mitigate the safety risks associated with web agents, ensuring they are safeguarded against producing and executing harmful actions. The code will also be released solely for research purposes, with the goal of making the web more accessible via language technologies under an OPEN-RAIL license. We are strongly against any potentially harmful use of the data or technology by any party." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.543, + 0.275, + 0.559 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.568, + 0.828, + 0.625 + ], + "angle": 0, + "content": "Tianle Cai, Xuezhi Wang, Tengyu Ma, Xinyun Chen, and Denny Zhou. Large language models as tool makers. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=qV83K9d5WB." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.636, + 0.829, + 0.694 + ], + "angle": 0, + "content": "Hyungjoo Chae, Namyoung Kim, Kai Tzu iunn Ong, Minju Gwak, Gwanwoo Song, Ji-hoon Kim, Sunghwan Kim, Dongha Lee, and Jinyoung Yeo. Web agents with world models: Learning and leveraging environment dynamics in web navigation. ArXiv, abs/2410.13232, 2024. URL https://api-semanticscholar.org/CorpusID:273404026." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.704, + 0.829, + 0.763 + ], + "angle": 0, + "content": "Zixiang Chen, Yihe Deng, Huizhuo Yuan, Kaixuan Ji, and Quanquan Gu. Self-play finetuning converts weak language models to strong language models. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=04cHTxW9BS." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.772, + 0.829, + 0.871 + ], + "angle": 0, + "content": "Kanzhi Cheng, Qiushi Sun, Yougang Chu, Fangzhi Xu, Yantao Li, Jianbing Zhang, and Zhiyong Wu. Seeclick: Harnessing GUI grounding for advanced visual GUI agents. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August 11-16, 2024, pp. 9313-9332. Association for Computational Linguistics, 2024. doi: 10.18653/V1/2024.ACL-LONG.505. URL https://doi.org/10.18653/v1/2024.acl-long.505." + }, + { + "type": "ref_text", + "bbox": [ + 0.172, + 0.882, + 0.829, + 0.926 + ], + "angle": 0, + "content": "Xiang Deng, Yu Gu, Boyuan Zheng, Shijie Chen, Samual Stevens, Boshi Wang, Huan Sun, and Yu Su. Mind2web: Towards a generalist agent for the web. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine" + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.568, + 0.829, + 0.926 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.103, + 0.829, + 0.161 + ], + "angle": 0, + "content": "(eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitnet/5950bf290a1570ea401bf98882128160-Abstract-Datasets_and_Benchmarks.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.171, + 0.169, + 0.83, + 0.268 + ], + "angle": 0, + "content": "Yuqing Du, Olivia Watkins, Zihan Wang, Cedric Colas, Trevor Darrell, Pieter Abbeel, Abhishek Gupta, and Jacob Andreas. Guiding pretraining in reinforcement learning with large language models. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett (eds.), International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, volume 202 of Proceedings of Machine Learning Research, pp. 8657-8677. PMLR, 2023. URL https://proceedings.mlr.press/v202/du23f.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.276, + 0.83, + 0.348 + ], + "angle": 0, + "content": "Hiroki Furuta, Kuang-Huei Lee, Ofir Nachum, Yutaka Matsuo, Aleksandra Faust, Shixiang Shane Gu, and Izzeddin Gur. Multimodal web navigation with instructionfinetuned foundation models. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=efFmBWioSc." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.355, + 0.828, + 0.399 + ], + "angle": 0, + "content": "Boyu Gou, Ruohan Wang, Boyuan Zheng, Yanan Xie, Cheng Chang, Yiheng Shu, Huan Sun, and Yu Su. Navigating the digital world as humans do: Universal visual grounding for gui agents. arXiv preprint arXiv:2410.05243, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.407, + 0.83, + 0.504 + ], + "angle": 0, + "content": "Yu Gu, Yiheng Shu, Hao Yu, Xiao Liu, Yuxiao Dong, Jie Tang, Jayanth Srinivasa, Hugo Latapie, and Yu Su. Middleware for llms: Tools are instrumental for language agents in complex environments. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pp. 7646-7663. Association for Computational Linguistics, 2024a. URL https://aclanthology.org/2024.emnlp-main.436." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.513, + 0.83, + 0.571 + ], + "angle": 0, + "content": "Yu Gu, Kai Zhang, Yuting Ning, Boyuan Zheng, Boyu Gou, Tianci Xue, Cheng Chang, Sanjari Srivastava, Yanan Xie, Peng Qi, Huan Sun, and Yu Su. Is your llm secretly a world model of the internet? model-based planning for web agents, 2024b. URL https://arxiv.org/abs/2411.06559." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.578, + 0.83, + 0.678 + ], + "angle": 0, + "content": "Hongliang He, Wenlin Yao, Kaixin Ma, Wenhao Yu, Yong Dai, Hongming Zhang, Zhenzhong Lan, and Dong Yu. Webvoyager: Building an end-to-end web agent with large multimodal models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August 11-16, 2024, pp. 6864-6890. Association for Computational Linguistics, 2024. doi: 10.18653/V1/2024.ACL-LONG.371. URL https://doi.org/10.18653/v1/2024.acl-long.371." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.685, + 0.829, + 0.743 + ], + "angle": 0, + "content": "Wenyi Hong, Weihan Wang, Qingsong Lv, Jiazheng Xu, Wenmeng Yu, Junhui Ji, Yan Wang, Zihan Wang, Yuxiao Dong, Ming Ding, and Jie Tang. Cogagent: A visual language model for gui agents. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14281-14290, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.751, + 0.829, + 0.836 + ], + "angle": 0, + "content": "Jiaxin Huang, Shixiang Gu, Le Hou, Yuexin Wu, Xuezhi Wang, Hongkun Yu, and Jiawei Han. Large language models can self-improve. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, EMNLP 2023, Singapore, December 6-10, 2023, pp. 1051-1068. Association for Computational Linguistics, 2023. doi: 10.18653/V1/2023.EMNLP-MAIN.67. URL https://doi.org/10.18653/v1/2023.emnlp-main.67." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.844, + 0.829, + 0.887 + ], + "angle": 0, + "content": "Jing Yu Koh, Stephen McAleer, Daniel Fried, and Ruslan Salakhutdinov. Tree search for language model agents. CoRR, abs/2407.01476, 2024. doi: 10.48550/ARXIV.2407.01476. URL https://doi.org/10.48550/arXiv.2407.01476." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.829, + 0.927 + ], + "angle": 0, + "content": "Hanyu Lai, Xiao Liu, Iat Long Iong, Shuntian Yao, Yuxuan Chen, Pengbo Shen, Hao Yu, Hanchen Zhang, Xiaohan Zhang, Yuxiao Dong, et al. Autowebglm: A large language" + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.103, + 0.83, + 0.927 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.104, + 0.825, + 0.134 + ], + "angle": 0, + "content": "model-based web navigating agent. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, pp. 5295-5306, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.176, + 0.14, + 0.827, + 0.252 + ], + "angle": 0, + "content": "Wei Li, William E. Bishop, Alice Li, Christopher Rawles, Folawiyo Campbell-Ajala, Divya Tyamagundlu, and Oriana Riva. On the effects of data scale on UI control agents. In Amir Globersons, Lester Mackey, Danielle Belgrave, Angela Fan, Ulrich Paquet, Jakub M. Tomczak, and Cheng Zhang (eds.), Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024, 2024. URL http://papers.nips.cc/paper_files/paper/2024/bitstream/a79f3ef3b445fd4659f44648f7ea8ffd-Abstract-Datasets_and_Benchmarks_Track.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.259, + 0.827, + 0.357 + ], + "angle": 0, + "content": "Pan Lu, Baolin Peng, Hao Cheng, Michel Galley, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, and Jianfeng Gao. Chameleon: Plug-and-play compositional reasoning with large language models. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/black/871ed095b734818cfba48db6aab25a62-Abstract-Conference.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.365, + 0.828, + 0.49 + ], + "angle": 0, + "content": "Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegrefe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, Shashank Gupta, Bodhisattwa Prasad Majumder, Katherine Hermann, Sean Welleck, Amir Yazdanbakhsh, and Peter Clark. Self-refine: Iterative refinement with self-feedback. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bit/91edff07232fb1b55a505a9e9f6c0ff3-Abstract-Conference.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.498, + 0.827, + 0.554 + ], + "angle": 0, + "content": "Shikhar Murty, Dzmitry Bahdanau, and Christopher D. Manning. Nnetscape navigator: Complex demonstrations for web agents without a demonstrator. CoRR, abs/2410.02907, 2024a. doi: 10.48550/ARXIV.2410.02907. URL https://doi.org/10.48550/arXiv.2410.02907." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.562, + 0.827, + 0.618 + ], + "angle": 0, + "content": "Shikhar Murty, Christopher D. Manning, Peter Shaw, Mandar Joshi, and Kenton Lee. BAGEL: bootstrapping agents by guiding exploration with language. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024b. URL https://openreview.net/forum?id=VsvfSMI5bs." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.626, + 0.827, + 0.682 + ], + "angle": 0, + "content": "Vardaan Pahuja, Yadong Lu, Corby Rosset, Boyu Gou, Arindam Mitra, Spencer Whitehead, Yu Su, and Ahmed Awadallah. Explorer: Scaling exploration-driven web trajectory synthesis for multimodal web agents. 2025. URL https://api-semanticscholar.org/ CorpusID:276408442." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.69, + 0.827, + 0.733 + ], + "angle": 0, + "content": "Jiayi Pan, Yichi Zhang, Nicholas Tomlin, Yifei Zhou, Sergey Levine, and Alane Suhr. Autonomous evaluation and refinement of digital agents. CoRR, abs/2404.06474, 2024. doi: 10.48550/ARXIV.2404.06474. URL https://doi.org/10.48550/arXiv.2404.06474." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.74, + 0.827, + 0.796 + ], + "angle": 0, + "content": "Ajay Patel, Markus Hofmarcher, Claudiu Leoveanu-Condrei, Marius-Constantin Dinu, Chris Callison-Burch, and Sepp Hochreiter. Large language models can self-improve at web agent tasks. CoRR, abs/2405.20309, 2024. doi: 10.48550/ARXIV.2405.20309. URL https://doi.org/10.48550/arXiv.2405.20309." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.804, + 0.825, + 0.86 + ], + "angle": 0, + "content": "Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent Q: advanced reasoning and learning for autonomous AI agents. CoRR, abs/2408.07199, 2024. doi: 10.48550/ARXIV.2408.07199. URL https://doi.org/10.48550/arXiv.2408.07199." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.868, + 0.825, + 0.924 + ], + "angle": 0, + "content": "Cheng Qian, Chi Han, Yi Fung, Yujia Qin, Zhiyuan Liu, and Heng Ji. CREATOR: Tool creation for disentangling abstract and concrete reasoning of large language models. In The 2023 Conference on Empirical Methods in Natural Language Processing, 2023. URL https://openreview.net/forum?id=aCHq10rQiH." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.104, + 0.828, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.103, + 0.829, + 0.162 + ], + "angle": 0, + "content": "Gabriel Sarch, Lawrence Jang, Michael J. Tarr, William W. Cohen, Kenneth Marino, and Katerina Fragkiadaki. Ical: Continual learning of multimodal agents by transforming trajectories into actionable insights. ArXiv, abs/2406.14596, 2024. URL https://api.sementicscholar.org/CorpusID:274466462." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.169, + 0.831, + 0.283 + ], + "angle": 0, + "content": "Timo Schick, Jane Dwivedi-Yu, Roberto Dessi, Roberta Raileanu, Maria Lomeli, Eric Hambro, Luke Zettlemoyer, Nicola Cancedda, and Thomas Scialom. Toolformer: Language models can teach themselves to use tools. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitstream/d842425e4bf79ba039352da0f658a906-Abstract-Conference.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.289, + 0.831, + 0.403 + ], + "angle": 0, + "content": "Peter Shaw, Mandar Joshi, James Cohan, Jonathan Berant, Panupong Pasupat, Hexiang Hu, Urvashi Khandelwal, Kenton Lee, and Kristina Toutanova. From pixels to UI actions: Learning to follow instructions via graphical user interfaces. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitstream/6c52a8a4fadc9129c6e1d1745f2dfd0f-Abstract-Conference.html." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.41, + 0.828, + 0.468 + ], + "angle": 0, + "content": "Junhong Shen, Atishay Jain, Zedian Xiao, Ishan Amlekar, Mouad Hadji, Aaron Podolny, and Ameet Talwalkar. Scribeagent: Towards specialized web agents using production-scale workflow data. ArXiv, abs/2411.15004, 2024. URL https://api(semanticscholar.org/ CorpusID:274192657." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.476, + 0.83, + 0.617 + ], + "angle": 0, + "content": "Avi Singh, John D. Co-Reyes, Rishabh Agarwal, Ankesh Anand, Piyush Patil, Xavier Garcia, Peter J. Liu, James Harrison, Jaehoon Lee, Kelvin Xu, Aaron T. Parisi, Abhishek Kumar, Alexander A. Alemi, Alex Rizkowsky, Azade Nova, Ben Adlam, Bernd Bohnet, Gamaleldin Fathy Elsayed, Hanie Sedghi, Igor Mordatch, Isabelle Simpson, Izzeddin Gur, Jasper Snoek, Jeffrey Pennington, Jiri Hron, Kathleen Kenealy, Kevin Swersky, Kshitteej Mahajan, Laura Culp, Lechao Xiao, Maxwell L. Bileschi, Noah Constant, Roman Novak, Rosanne Liu, Tris Warkentin, Yundi Qian, Yamini Bansal, Ethan Dyer, Behnam Neyshabur, Jascha Sohl-Dickstein, and Noah Fiedel. Beyond human data: Scaling self-training for problem-solving with language models. Trans. Mach. Learn. Res., 2024, 2024. URL https://openreview.net/forum?id=1NAyUngGFK." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.624, + 0.826, + 0.654 + ], + "angle": 0, + "content": "Paloma Sodhi, S. R. K. Branavan, Yoav Artzi, and Ryan McDonald. Step: Stacked llm policies for web actions, 2024. URL https://arxiv.org/abs/2310.03720." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.661, + 0.829, + 0.706 + ], + "angle": 0, + "content": "Yifan Song, Da Yin, Xiang Yue, Jie Huang, Sujian Li, and Bill Yuchen Lin. Trial and error: Exploration-based trajectory optimization for LLM agents. CoRR, abs/2403.02502, 2024a. doi: 10.48550/ARXIV.2403.02502. URL https://doi.org/10.48550/arXiv.2403.02502." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.713, + 0.829, + 0.757 + ], + "angle": 0, + "content": "Yueqi Song, Frank F. Xu, Shuyan Zhou, and Graham Neubig. Beyond browsing: Api-based web agents. ArXiv, abs/2410.16464, 2024b. URL https://api-semanticscholar.org/ CorpusID:273507298." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.764, + 0.83, + 0.821 + ], + "angle": 0, + "content": "Hongjin Su, Ruoxi Sun, Jinsung Yoon, Pengcheng Yin, Tao Yu, and Sercan Ö. Arik. Learn-by-interact: A data-centric framework for self-adaptive agents in realistic environments. CoRR, abs/2501.10893, 2025. doi: 10.48550/ARXIV.2501.10893. URL https://doi.org/10.48550/arXiv.2501.10893." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.829, + 0.83, + 0.887 + ], + "angle": 0, + "content": "Fahim Tajwar, Yiding Jiang, Abitha Thankaraj, Sumaita Sadia Rahman, J. Zico Kolter, Jeff Schneider, and Ruslan Salakhutdinov. Training a generally curious agent. CoRR, abs/2502.17543, 2025. doi: 10.48550/ARXIV.2502.17543. URL https://doi.org/10.48550/arXiv.2502.17543." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.895, + 0.829, + 0.927 + ], + "angle": 0, + "content": "Guanzhi Wang, Yuqi Xie, Yunfan Jiang, Ajay Mandlekar, Chaowei Xiao, Yuke Zhu, Linxi (Jim) Fan, and Anima Anandkumar. Voyager: An open-ended embodied agent" + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.103, + 0.831, + 0.927 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.189, + 0.104, + 0.826, + 0.133 + ], + "angle": 0, + "content": "with large language models. Trans. Mach. Learn. Res., 2024, 2023a. URL https://api.sementicscholar.org/CorpusID:258887849." + }, + { + "type": "ref_text", + "bbox": [ + 0.176, + 0.143, + 0.827, + 0.213 + ], + "angle": 0, + "content": "Haiming Wang, Huajian Xin, Chuanyang Zheng, Zhengying Liu, Qingxing Cao, Yinya Huang, Jing Xiong, Han Shi, Enze Xie, Jian Yin, Zhenguo Li, and Xiaodan Liang. LEGO: Neural theorem proving with growing libraries. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=3f5PALef5B." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.224, + 0.827, + 0.281 + ], + "angle": 0, + "content": "Xingyao Wang, Yangyi Chen, Lifan Yuan, Yizhe Zhang, Yunzhu Li, Hao Peng, and Heng Ji. Executable code actions elicit better LLM agents. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024b. URL https://openreview.net/forum?id=jJ9BoXAfFa." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.29, + 0.827, + 0.389 + ], + "angle": 0, + "content": "Yizhong Wang, Yeganeh Kordi, Swaroop Mishra, Alisa Liu, Noah A. Smith, Daniel Khashabi, and Hannaneh Hajishirzi. Self-instruct: Aligning language models with self-generated instructions. In Anna Rogers, Jordan L. Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2023, Toronto, Canada, July 9-14, 2023, pp. 13484-13508. Association for Computational Linguistics, 2023b. doi: 10.18653/V1/2023.ACL-LONG.754. URL https://doi.org/10.18653/v1/2023.acl-long.754." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.399, + 0.825, + 0.442 + ], + "angle": 0, + "content": "Zhiruo Wang, Zhoujun Cheng, Hao Zhu, Daniel Fried, and Graham Neubig. What are tools anyway? a survey from the language model perspective. In First Conference on Language Modeling, 2024c. URL https://openreview.net/forum?id=Xh1B90iBSR." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.452, + 0.825, + 0.495 + ], + "angle": 0, + "content": "Zhiruo Wang, Graham Neubig, and Daniel Fried. TroVE: Inducing verifiable and efficient toolboxes for solving programmatic tasks. In *Forty-first International Conference on Machine Learning*, 2024d. URL https://openreview.net/forum?id=DCNCwaMJjI." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.505, + 0.825, + 0.534 + ], + "angle": 0, + "content": "Zora Zhiruo Wang, Jiayuan Mao, Daniel Fried, and Graham Neubig. Agent workflow memory. arXiv preprint arXiv:2409.07429, 2024e." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.544, + 0.825, + 0.601 + ], + "angle": 0, + "content": "Zhiyong Wu, Chengcheng Han, Zichen Ding, Zhenmin Weng, Zhoumianze Liu, Shunyu Yao, Tao Yu, and Lingpeng Kong. Os-copilot: Towards generalist computer agents with self-improvement. CoRR, abs/2402.07456, 2024. doi: 10.48550/ARXIV.2402.07456. URL https://doi.org/10.48550/arXiv.2402.07456." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.611, + 0.827, + 0.696 + ], + "angle": 0, + "content": "Tianbao Xie, Danyang Zhang, Jixuan Chen, Xiaochuan Li, Siheng Zhao, Ruisheng Cao, Toh Jing Hua, Zhoujun Cheng, Dongchan Shin, Fangyu Lei, Yitao Liu, Yiheng Xu, Shuyan Zhou, Silvio Savarese, Caiming Xiong, Victor Zhong, and Tao Yu. Osworld: Benchmarking multimodal agents for open-ended tasks in real computer environments. CoRR, abs/2404.07972, 2024. doi: 10.48550/ARXIV.2404.07972. URL https://doi.org/10.48550/arXiv.2404.07972." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.706, + 0.827, + 0.75 + ], + "angle": 0, + "content": "Tianci Xue, Weijian Qi, Tianneng Shi, Chan Hee Song, Boyu Gou, Dawn Song, Huan Sun, and Yu Su. An illusion of progress? assessing the current state of web agents. OSU NLP Blog, Mar 2025. URL https://tinyurl.com/online-mind2web-blog." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.759, + 0.827, + 0.857 + ], + "angle": 0, + "content": "Shunyu Yao, Howard Chen, John Yang, and Karthik Narasimhan. Webshop: Towards scalable real-world web interaction with grounded language agents. In Sanmi Koyejo, S. Mohamed, A. Agarwal, Danielle Belgrave, K. Cho, and A. Oh (eds.), Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, New Orleans, LA, USA, November 28 - December 9, 2022, 2022. URL http://papers.nips.cc/paper_files/paper/2022/bit/82ad13ec01f9fe44c01cb91814fd7b8c-Abstract-Conference.htm1." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.867, + 0.827, + 0.925 + ], + "angle": 0, + "content": "Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik R. Narasimhan, and Yuan Cao. React: Synergizing reasoning and acting in language models. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net, 2023. URL https://openreview.net/forum?id=WE_vluYUL-X." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.104, + 0.827, + 0.925 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.103, + 0.826, + 0.161 + ], + "angle": 0, + "content": "Ori Yoran, Samuel Joseph Amouyal, Chaitanya Malaviya, Ben Boin, Ofir Press, and Jonathan Berant. Assistantbench: Can web agents solve realistic and time-consuming tasks? In Conference on Empirical Methods in Natural Language Processing, 2024. URL https://api_semanticscholar.org/CorpusID:271328691." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.169, + 0.827, + 0.268 + ], + "angle": 0, + "content": "Keen You, Haotian Zhang, Eldon Schoop, Floris Weers, Amanda Swearngin, Jeffrey Nichols, Yinfei Yang, and Zhe Gan. Ferret-ui: Grounded mobile UI understanding with multimodal llms. In Ales Leonardis, Elisa Ricci, Stefan Roth, Olga Russakovsky, Torsten Sattler, and Gül Varol (eds.), Computer Vision - ECCV 2024 - 18th European Conference, Milan, Italy, September 29-October 4, 2024, Proceedings, Part LXIV, volume 15122 of Lecture Notes in Computer Science, pp. 240-255. Springer, 2024. doi: 10.1007/978-3-031-73039-9\\_.14. URL https://doi.org/10.1007/978-3-031-73039-9_14." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.276, + 0.827, + 0.332 + ], + "angle": 0, + "content": "Lifan Yuan, Yangyi Chen, Xingyao Wang, Yi Fung, Hao Peng, and Heng Ji. CRAFT: Customizing LLMs by creating and retrieving from specialized toolsets. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=G0vdDSt9XM." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.341, + 0.827, + 0.398 + ], + "angle": 0, + "content": "Weizhe Yuan, Richard Yuanzhe Pang, Kyunghyun Cho, Xian Li, Sainbayar Sukhbaatar, Jing Xu, and Jason Weston. Self-rewarding language models. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024b. URL https://openreview.net/forum?id=ØNphYCmgua." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.407, + 0.827, + 0.464 + ], + "angle": 0, + "content": "Yao Zhang, Zijian Ma, Yunpu Ma, Zhen Han, Yu Wu, and Volker Tresp. Webpilot: A versatile and autonomous multi-agent system for web task execution with strategic exploration. CoRR, abs/2408.15978, 2024. doi: 10.48550/ARXIV.2408.15978. URL https://doi.org/10.48550/arXiv.2408.15978." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.472, + 0.827, + 0.515 + ], + "angle": 0, + "content": "Boyuan Zheng, Boyu Gou, Jihyung Kil, Huan Sun, and Yu Su. Gpt-4v(isdiction) is a generalist web agent, if grounded. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=piecKJ2D1B." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.524, + 0.827, + 0.58 + ], + "angle": 0, + "content": "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. Judging llm-as-a-judge with mt-bench and chatbot arena. Advances in Neural Information Processing Systems, 36:46595-46623, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.589, + 0.827, + 0.661 + ], + "angle": 0, + "content": "Shuyan Zhou, Frank F. Xu, Hao Zhu, Xuhui Zhou, Robert Lo, Abishek Sridhar, Xianyi Cheng, Tianyue Ou, Yonatan Bisk, Daniel Fried, Uri Alon, and Graham Neubig. Webarena: A realistic web environment for building autonomous agents. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024a. URL https://openreview.net/forum?id=oKn9c6ytLx." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.669, + 0.827, + 0.725 + ], + "angle": 0, + "content": "Yifei Zhou, Qianlan Yang, Kaixiang Lin, Min Bai, Xiong Zhou, Yu-Xiong Wang, Sergey Levine, and Li Erran Li. Proposer-agent-evaluator(pae): Autonomous skill discovery for foundation model internet agents. CoRR, abs/2412.13194, 2024b. doi: 10.48550/ARXIV.2412.13194. URL https://doi.org/10.48550/arXiv.2412.13194." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.734, + 0.827, + 0.79 + ], + "angle": 0, + "content": "Mingchen Zhuge, Changsheng Zhao, Dylan Ashley, Wenyi Wang, Dmitrii Khizbullin, Yunyang Xiong, Zechun Liu, Ernie Chang, Raghuraman Krishnamoorthi, Yuandong Tian, et al. Agent-as-a-judge: Evaluate agents with agents. arXiv preprint arXiv:2410.10934, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.103, + 0.827, + 0.79 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.508, + 0.96 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.104, + 0.305, + 0.119 + ], + "angle": 0, + "content": "Table of Content:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.126, + 0.395, + 0.141 + ], + "angle": 0, + "content": "- Appendix A: Prompts" + }, + { + "type": "text", + "bbox": [ + 0.248, + 0.143, + 0.478, + 0.158 + ], + "angle": 0, + "content": "- Appendix A.1: Skill Proposal" + }, + { + "type": "text", + "bbox": [ + 0.248, + 0.16, + 0.723, + 0.174 + ], + "angle": 0, + "content": "- Appendix A.2: Code Generation Agent Context Representation" + }, + { + "type": "text", + "bbox": [ + 0.248, + 0.176, + 0.664, + 0.19 + ], + "angle": 0, + "content": "- Appendix A.3: Code Generation Agent Error Detection" + }, + { + "type": "text", + "bbox": [ + 0.248, + 0.192, + 0.498, + 0.206 + ], + "angle": 0, + "content": "- Appendix A.4: Action Synthesis" + }, + { + "type": "text", + "bbox": [ + 0.248, + 0.208, + 0.506, + 0.223 + ], + "angle": 0, + "content": "- Appendix A.5: Success Checking" + }, + { + "type": "text", + "bbox": [ + 0.248, + 0.225, + 0.657, + 0.24 + ], + "angle": 0, + "content": "- Appendix A.6: Persisting Attempt to Knowledge Base" + }, + { + "type": "text", + "bbox": [ + 0.248, + 0.241, + 0.507, + 0.256 + ], + "angle": 0, + "content": "- Appendix A.7: Scraping Prompts" + }, + { + "type": "list", + "bbox": [ + 0.248, + 0.143, + 0.723, + 0.256 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.258, + 0.492, + 0.271 + ], + "angle": 0, + "content": "- Appendix B: WebArena Benchmark" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.272, + 0.516, + 0.285 + ], + "angle": 0, + "content": "- Appendix C: Real World Website Tasks" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.286, + 0.435, + 0.3 + ], + "angle": 0, + "content": "- Appendix D: Example APIs" + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.258, + 0.516, + 0.3 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.248, + 0.302, + 0.478, + 0.316 + ], + "angle": 0, + "content": "- Appendix D.1: Success Cases" + }, + { + "type": "text", + "bbox": [ + 0.248, + 0.318, + 0.475, + 0.332 + ], + "angle": 0, + "content": "- Appendix D.2: Failure Cases" + }, + { + "type": "text", + "bbox": [ + 0.248, + 0.334, + 0.508, + 0.349 + ], + "angle": 0, + "content": "- Appendix D.3: Composition API." + }, + { + "type": "list", + "bbox": [ + 0.248, + 0.302, + 0.508, + 0.349 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.103, + 0.289, + 0.121 + ], + "angle": 0, + "content": "A Prompts" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.135, + 0.326, + 0.153 + ], + "angle": 0, + "content": "A.1 Skill Proposal" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.162, + 0.828, + 0.207 + ], + "angle": 0, + "content": "In case the agent is being used on a live website, we add safety instructions which bias the agent away from actions that could potentially have side effects or that would otherwise be disadvantageous to explore, such as creating accounts or interacting with human users." + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.216, + 0.4, + 0.233 + ], + "angle": 0, + "content": "Proposing Tasks to Explore" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.244, + 0.803, + 0.261 + ], + "angle": 0, + "content": "System: You propose tasks that would make good 'tools' for external users of a website. User:" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.264, + 0.758, + 0.306 + ], + "angle": 0, + "content": "You are a 'web agent' who is learning how to use a website. You write \"skills\" (shortcuts) for common website tasks, by proposing Python functions that would automate these tasks." + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.315, + 0.614, + 0.368 + ], + "angle": 0, + "content": "You have already proposed the following skills: \n \n{procedural knowledge} \n" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.378, + 0.756, + 0.444 + ], + "angle": 0, + "content": "You have built up the following knowledge about the website (in addition to the current screenshot): {semantic_knowledge} " + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.454, + 0.771, + 0.543 + ], + "angle": 0, + "content": "Now please come up with something new to learn how to do on this website. The website is structured according to the following accessibility tree hierarchy: {ax_tree} " + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.554, + 0.765, + 0.607 + ], + "angle": 0, + "content": "Do not interact with the Advanced Reporting tab if you are using \nMagenta. \nDo not interact with login/login/user accounts on any site. \nIf you're on OpenStreetMap, don't interact with community featur" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.617, + 0.793, + 0.733 + ], + "angle": 0, + "content": "Write a list of useful skills/shortcuts that you would want to have built into a website as Python functions. Write the name in natural language format. Do not use \\(*\\_id\\) as a parameter in your skill. Again, your goal is to generate functions that would be useful \"shortcuts\" for users of the website, so you should prioritize generating skills that compress a couple interactions into a single function call. Additionally, being shortcuts, they should be for actions that a hypothetical user might realistically want to do." + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.743, + 0.333, + 0.756 + ], + "angle": 0, + "content": "Then, estimate:" + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.757, + 0.773, + 0.781 + ], + "angle": 0, + "content": "(1) how useful they are (5 being difficult and frequency, 1 being trivial or uncommon)," + }, + { + "type": "text", + "bbox": [ + 0.201, + 0.782, + 0.796, + 0.82 + ], + "angle": 0, + "content": "(2) the expected number of clicking/typing actions required to complete the skill. (calculate this by writing the list of steps and counting AFTERWARDs)" + }, + { + "type": "list", + "bbox": [ + 0.201, + 0.757, + 0.796, + 0.82 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.831, + 0.798, + 0.869 + ], + "angle": 0, + "content": "Prefer to generate skills that are creating, modifying, or filtering/querying data on the website, as these tend to be more useful." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.87, + 0.683, + 0.884 + ], + "angle": 0, + "content": "Do not generate skills simply to perform single clicks." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.894, + 0.386, + 0.91 + ], + "angle": 0, + "content": "{safetyInstructions}" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.199, + 0.125, + 0.782, + 0.153 + ], + "angle": 0, + "content": "Then, calculate the sum of these ratings for each skill. Finally, select the skill with the highest rating." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.153, + 0.765, + 0.179 + ], + "angle": 0, + "content": "Write your ratings in `step_by_step_reasoning`. Then, write your skill choice in `proposed_skill`." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.215, + 0.573, + 0.232 + ], + "angle": 0, + "content": "A.2 Code Generation Agent Context Representation" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.242, + 0.434, + 0.259 + ], + "angle": 0, + "content": "A.2.1 States as Accessibility Trees" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.267, + 0.828, + 0.351 + ], + "angle": 0, + "content": "We represent the state as a tuple containing an screenshot of the page, a title, a URL, and an accessibility tree hierarchy. These are serialized by concatenating clean string representations of each of these properties. We represent each node in the accessibility hierarchy as a line containing (1) indentation representing the depth in the tree, (2) the accessible role, (3) the accessible name of the element, in quotation marks, and finally (4) any other accessibility tree information (represented as a ia- attributes in the DOM.)" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.367, + 0.293, + 0.383 + ], + "angle": 0, + "content": "A.2.2 Prompts" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.39, + 0.63, + 0.409 + ], + "angle": 0, + "content": "Substrings in square brackets ([]) are omitted if not applicable." + }, + { + "type": "code_caption", + "bbox": [ + 0.199, + 0.417, + 0.462, + 0.433 + ], + "angle": 0, + "content": "Code Agent Context Representation" + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.436, + 0.825, + 0.921 + ], + "angle": 0, + "content": "URL: {relative_url} \n{ state_accessibility_tree_string } \n \n{step_by_step_reasoning} \n \n \n{action_source} \n \n[ \n{stdout} \n] \n[ \n{return_value} \n] \n[< warnings> \n{ warnings} \n] \n[ \n{ recovered_exception} \n, ...] \n[ \n{unrecoverable_exception} \n] \nURL: {relative_url}" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "code", + "bbox": [ + 0.197, + 0.125, + 0.42, + 0.304 + ], + "angle": 0, + "content": "{state_accessibility_tree_string} \n \n... \n \nURL: {relative_url} \n{state_accessibility_tree_string} \n \n \n{result} \n" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.34, + 0.513, + 0.357 + ], + "angle": 0, + "content": "A.3 Code Generation Agent Error Detection" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.366, + 0.825, + 0.397 + ], + "angle": 0, + "content": "This is the prompt template used if we detect any errors in the agent's code. We use the following error messages:" + }, + { + "type": "text", + "bbox": [ + 0.21, + 0.406, + 0.72, + 0.42 + ], + "angle": 0, + "content": "1. A function named act not provided: Function name must be 'act'" + }, + { + "type": "text", + "bbox": [ + 0.209, + 0.424, + 0.825, + 0.453 + ], + "angle": 0, + "content": "2. The function act does not contain a single argument, page: Function must take exactly one argument: 'page'." + }, + { + "type": "text", + "bbox": [ + 0.209, + 0.456, + 0.825, + 0.484 + ], + "angle": 0, + "content": "3. The function function is disabled: Function 'fnname' is disabled. Please use another function." + }, + { + "type": "text", + "bbox": [ + 0.209, + 0.488, + 0.825, + 0.532 + ], + "angle": 0, + "content": "4. Incorrect or buggy Playwright functions: Please use the 'page.get_by...().f()' functions instead of the 'page.f(selector)' functions. where \\( f \\in \\{click, fill, type\\} \\)." + }, + { + "type": "text", + "bbox": [ + 0.209, + 0.534, + 0.825, + 0.591 + ], + "angle": 0, + "content": "5. Use of CSS selectors instead of accessibility tree selectors: Please use Accessibility Tree-centric selectors, like 'page.get_by_role()', '.nth()', instead of the CSS-style selectors like '.locator' or '.query_selector()'." + }, + { + "type": "text", + "bbox": [ + 0.209, + 0.594, + 0.825, + 0.635 + ], + "angle": 0, + "content": "6. Blank response: You did not provide any Python code, but you also did not provide a result for 'terminate_with_result'. Please provide one or the other." + }, + { + "type": "text", + "bbox": [ + 0.209, + 0.64, + 0.48, + 0.657 + ], + "angle": 0, + "content": "7. Type errors: Type Error: {error}" + }, + { + "type": "list", + "bbox": [ + 0.209, + 0.406, + 0.825, + 0.657 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.671, + 0.347, + 0.687 + ], + "angle": 0, + "content": "A.4 Action Synthesis" + }, + { + "type": "title", + "bbox": [ + 0.198, + 0.703, + 0.427, + 0.719 + ], + "angle": 0, + "content": "Code Agent Action Generation" + }, + { + "type": "text", + "bbox": [ + 0.198, + 0.731, + 0.686, + 0.746 + ], + "angle": 0, + "content": "You generate Playwright code to interact with websites. Words of wisdom:" + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.749, + 0.801, + 0.802 + ], + "angle": 0, + "content": "- If you want to click a generic button (e.g., that belongs to an element), use the full .get_by-role() path to the element (e.g., .get_by-role(\"group\", name=\"Test Item\").get_by-role(\"button\", name=\"Go\") instead of .get_by-role(\"button\", name=\"Go\"), as this is ambiguous)." + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.805, + 0.518, + 0.819 + ], + "angle": 0, + "content": "- Write exceptionally correct Python code." + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.822, + 0.801, + 0.862 + ], + "angle": 0, + "content": "- You love to take advantage of functions in the knowledge_base whenever possible. You use them via Python function calls. It is required to use the knowledge base function corresponding to an action if it exists." + }, + { + "type": "text", + "bbox": [ + 0.241, + 0.865, + 0.752, + 0.91 + ], + "angle": 0, + "content": "- Use relative goto when you can. \nare currently on a webpage titled {title_repr}, with the URL {url_repr}. \nder the following Accessibility Tree. The accessibility tree is formatted like this:" + }, + { + "type": "list", + "bbox": [ + 0.241, + 0.749, + 0.801, + 0.91 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "code", + "bbox": [ + 0.196, + 0.109, + 0.8, + 0.911 + ], + "angle": 0, + "content": "```python\n[role] [\"name\"]\n[properties] {{{child1]}}; \n[role] [\"name\"]\n[properties]; // no children \nHere is an example: \n \narticle \"Willy Wonka's Chocolate Factory Opens Its Gates\" {{{link \"Share\";}}} \n}; \narticle \"Hogwarts Begins Accepting Applicants for Class of 2029\" {{{link \"Share\";}}} \n \nTo select the first link, you can do (because name = case-insensitive substring match unless exact=True is supplied): \nfirst_link = page.get_by_role(\"article\", name=\"Willy Wonka\").get_by_role(\"link\", name=\"Share\") \nHere is another example: \n \narticle {{{header \"How can I install CUDA on Ubuntu 22?\"; link \"Share\";}}} \n}; \narticle {{{header \"How do I install Docker?\"; link \"Share\";}}} \n \nTo select the first link, you can do: \npage.get_byRole(\"article\").filter (has=page.get_byRole(\"header\", \"How can I install CUDA\").get_byRole(\"link\", name=\"Share\") \nHere, the .filter() is necessary to ensure that we select the correct article. This is especially important in cases where the parent element doesn't include relevant criteria for identifying a child element. Tips about this tree: \n· If you see a node as a child of an iframe, you must use page.frame(name=...), and then access the node (via .get_by ROLE() or similar). \n· Some elements will require interaction via .select_option(). They will be labeled as such. This is because they are HTML aka get_by_placeholder(\"Search GitLab\")" + }, + { + "type": "text", + "bbox": [ + 0.226, + 0.764, + 0.795, + 0.817 + ], + "angle": 0, + "content": "2) > aka locator([[id=\\\"__BVID__158\\\"]])" + }, + { + "type": "list", + "bbox": [ + 0.226, + 0.726, + 0.798, + 0.817 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.826, + 0.261, + 0.84 + ], + "angle": 0, + "content": "Call log:" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.84, + 0.296, + 0.853 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.891, + 0.267, + 0.902 + ], + "angle": 0, + "content": "" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "32" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.197, + 0.112, + 0.628, + 0.129 + ], + "angle": 0, + "content": "URL: /byte blaze/accessible- html-content-patterns/-/branches" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.139, + 0.27, + 0.164 + ], + "angle": 0, + "content": "[Omitted] " + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.178, + 0.289, + 0.19 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.19, + 0.785, + 0.253 + ], + "angle": 0, + "content": "The page currently displays a dialog that asks for confirmation to delete all merged branches. It requires typing 'delete' into an input box. After typing the confirmation text, the \"Delete merged branches\" button can be clicked to proceed. I will fill the textbox with 'delete' and then click the confirmation button to complete the task." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.254, + 0.297, + 0.266 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.278, + 0.255, + 0.29 + ], + "angle": 0, + "content": "" + }, + { + "type": "code", + "bbox": [ + 0.214, + 0.31, + 0.794, + 0.438 + ], + "angle": 0, + "content": "async def act(page): # Fill the confirmation textbox with 'delete' await page.get_by_role(\"dialog\", name=\"Delete all merged branches?\").get_by-role(\"textbox\").fill(\"delete\") # Click the \"Delete merged branches\" button to confirm deletion await page.get_by-role(\"dialog\", name=\"Delete all merged branches?\").get_by-role(\"button\", name=\"Delete merged branches\").click()" + }, + { + "type": "code", + "bbox": [ + 0.199, + 0.46, + 0.266, + 0.494 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.512, + 0.266, + 0.522 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.523, + 0.627, + 0.537 + ], + "angle": 0, + "content": "URL: /byte blaze/accessible- html-content-patterns/-/branches" + }, + { + "type": "code", + "bbox": [ + 0.2, + 0.548, + 0.27, + 0.573 + ], + "angle": 0, + "content": "[Omitted] " + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.587, + 0.364, + 0.598 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.599, + 0.774, + 0.625 + ], + "angle": 0, + "content": "The merged branches have been successfully deleted in the 'byteblaze/accessible-xml-content-patterns' project." + }, + { + "type": "code", + "bbox": [ + 0.2, + 0.626, + 0.373, + 0.657 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.675, + 0.266, + 0.686 + ], + "angle": 0, + "content": "" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.687, + 0.626, + 0.701 + ], + "angle": 0, + "content": "URL: /byte blaze/accessible- html-content-patterns/~/branches" + }, + { + "type": "code", + "bbox": [ + 0.2, + 0.711, + 0.27, + 0.737 + ], + "angle": 0, + "content": "[Omitted] " + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.881, + 0.825, + 0.925 + ], + "angle": 0, + "content": "Here is the fix the agent made: removing the ambiguous DOM selector for \" textbox\", and replacing it with one that first selects the dialog window that contains the textbox, and then the textbox element within it." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "33" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.199, + 0.106, + 0.378, + 0.12 + ], + "angle": 0, + "content": "delete_merged_branches" + }, + { + "type": "code", + "bbox": [ + 0.196, + 0.142, + 0.8, + 0.615 + ], + "angle": 0, + "content": "async def deletemergedbranches(page, project_path):\n return (0, 1)\n [0, 1] = 0\n [0, 1] = 1\n [0, 1] = 2\n [0, 1] = 3\n [0, 1] = 4\n [0, 1] = 5\n [0, 1] = 6\n [0, 1] = 7\n [0, 1] = 8\n [0, 1] = 9\n [0, 1] = 10\n [0, 1] = 11\n [0, 1] = 12\n [0, 1] = 13\n [0, 1] = 14\n [0, 1] = 15\n [0, 1] = 16\n [0, 1] = 17\n [0, 1] = 18\n [0, 1] = 19\n [0, 1] = 20\n [0, 1] = 21\n [0, 1] = 22\n [0, 1] = 23\n [0, 1] = 24\n [0, 1] = 25\n [0, 1] = 26\n [0, 1] = 27\n [0, 1] = 28\n [0, 1] = 29\n [0, 1] = 30\n [0, 1] = 31\n [0, 1] = 32\n [0, 1] = 33\n [0, 1] = 34\n [0, 1] = 35\n [0, 1] = 36\n [0, 1] = 37\n [0, 1] = 38\n [0, 1] = 39\n [0, 1] = 40\n [0, 1] = 41\n [0, 1] = 42\n [0, 1] = 43\n [0, 1] = 44\n [0, 1] = 45\n [0, 1] = 46\n [0, 1] = 47\n [0, 1] = 48\n [0, 1] = 49\n [0, 1] = 50\n [0, 1] = 51\n [0, 1] = 52\n [0, 1] = 53\n [0, 1] = 54\n [0, 1] = 55\n [0, 1] = 56\n [0, 1] = 57\n [0, 1] = 58\n [0, 1] = 59\n [0, 1] = 60\n [0, 1] = 61\n [0, 1] = 62\n [0, 1] = 63\n [0, 1] = 64\n [0, 1] = 65\n [0, 1] = 66\n [0, 1] = 67\n [0, 1] = 68\n [0, 1] = 69\n [0, 1] = 70\n [0, 1] = 71\n [0, 1] = 72\n [0, 1] =" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.67, + 0.441, + 0.685 + ], + "angle": 0, + "content": "D.2 Failure Cases and Limitations" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.702, + 0.828, + 0.731 + ], + "angle": 0, + "content": "We observe some failure cases, which seem to be caused by gaps in LLM reasoning capabilities for certain situations." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.761, + 0.442, + 0.779 + ], + "angle": 0, + "content": "D.2.1 Criteria for Verifying Success" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.791, + 0.825, + 0.85 + ], + "angle": 0, + "content": "Because our criteria for a function to be \"verified\" was to have it be called without producing an exception, we found that occasionally, malfunctioning APIs could be marked as verified simply because they silenced all exceptions that could have occurred. This represents a measure for evaluation having unintended consequences." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.854, + 0.826, + 0.926 + ], + "angle": 0, + "content": "In the following example, a function was misused, and it was unable to resolve the necessary DOM elements given the page state. However, instead of improving the function's signature or adding a check to ensure the function was called correctly, the LLM adds \"if\" statements to simply avoid any of the atomic actions from producing an error. While this does reduce the number of exceptions, it does not improve the robustness of the API." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "34" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.199, + 0.106, + 0.386, + 0.12 + ], + "angle": 0, + "content": "update_productquantity" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.145, + 0.713, + 0.158 + ], + "angle": 0, + "content": "async def update_product_quantity(page, product_name, new_quantity):" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.158, + 0.267, + 0.164 + ], + "angle": 0, + "content": "1 1" + }, + { + "type": "text", + "bbox": [ + 0.243, + 0.171, + 0.767, + 0.196 + ], + "angle": 0, + "content": "Update the quantity of a specified product in the Magento Admin Panel's Advanced Inventory settings." + }, + { + "type": "text", + "bbox": [ + 0.243, + 0.208, + 0.76, + 0.233 + ], + "angle": 0, + "content": "This function navigates to the product's edit page, opens the Advanced Inventory dialog," + }, + { + "type": "text", + "bbox": [ + 0.243, + 0.234, + 0.76, + 0.26 + ], + "angle": 0, + "content": "and updates the quantity to the specified value. It includes proactive checks for element availability." + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.271, + 0.369, + 0.283 + ], + "angle": 0, + "content": "Initial UI State:" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.285, + 0.646, + 0.296 + ], + "angle": 0, + "content": "- The user must be logged into the Magento Admin Panel." + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.297, + 0.796, + 0.322 + ], + "angle": 0, + "content": "- The function assumes the product edit page is accessible via the '/admin/catalog/product/edit/name/{product_name}' URL." + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.336, + 0.28, + 0.347 + ], + "angle": 0, + "content": "Args:" + }, + { + "type": "text", + "bbox": [ + 0.243, + 0.348, + 0.757, + 0.36 + ], + "angle": 0, + "content": "page (Page): The playwright page object representing the browser page." + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.361, + 0.639, + 0.372 + ], + "angle": 0, + "content": "product_name(str): The name of the product to update." + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.373, + 0.684, + 0.385 + ], + "angle": 0, + "content": "newquantity(int):The new quantity to set for the product." + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.398, + 0.317, + 0.41 + ], + "angle": 0, + "content": "Usage Log:" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.411, + 0.728, + 0.422 + ], + "angle": 0, + "content": "- Successfully updated the quantity for 'Arcadio Gym Short' to 50." + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.423, + 0.69, + 0.434 + ], + "angle": 0, + "content": "- Adjusted the quantity for 'Yoga Mat' to 100 without issues." + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.436, + 0.757, + 0.447 + ], + "angle": 0, + "content": "- Encountered a timeout error when trying to update 'Sprite Yoga Mat'." + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.448, + 0.76, + 0.46 + ], + "angle": 0, + "content": "Consider checking the availability of the 'Advanced Inventory' button." + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.46, + 0.267, + 0.466 + ], + "angle": 0, + "content": "1 1" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.474, + 0.312, + 0.485 + ], + "angle": 0, + "content": "import re" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.487, + 0.349, + 0.498 + ], + "angle": 0, + "content": "import asyncio" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.51, + 0.743, + 0.523 + ], + "angle": 0, + "content": "await page.goto(f\"/admin/catalog/product/edit/name/{product_name}/\"" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.524, + 0.752, + 0.547 + ], + "angle": 0, + "content": "advanced_inventory_button = page.get_by-role(\"button\", name=\"Advanced Inventory\")" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.549, + 0.589, + 0.56 + ], + "angle": 0, + "content": "if await advanced_inventory_button.count() > 0:" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.562, + 0.56, + 0.574 + ], + "angle": 0, + "content": "await advanced_inventory_button.click()" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.575, + 0.28, + 0.584 + ], + "angle": 0, + "content": "else:" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.587, + 0.604, + 0.598 + ], + "angle": 0, + "content": "print(\"Advanced Inventory button not found.\")" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.6, + 0.32, + 0.61 + ], + "angle": 0, + "content": "return" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.612, + 0.685, + 0.624 + ], + "angle": 0, + "content": "qty_input = page.get_by-role(\"textbox\", name=\"[GLOBAL] Qty\")" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.625, + 0.471, + 0.636 + ], + "angle": 0, + "content": "if await qty_input.count() > 0:" + }, + { + "type": "text", + "bbox": [ + 0.274, + 0.638, + 0.56, + 0.649 + ], + "angle": 0, + "content": "await qty_input fills(str(new_quantity))" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.65, + 0.28, + 0.66 + ], + "angle": 0, + "content": "else:" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.662, + 0.634, + 0.674 + ], + "angle": 0, + "content": "print(\"Quantity input not found.\")" + }, + { + "type": "text", + "bbox": [ + 0.274, + 0.676, + 0.32, + 0.685 + ], + "angle": 0, + "content": "return" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.687, + 0.634, + 0.699 + ], + "angle": 0, + "content": "done_button = page.get_by-role(\"button\", name=\"Done\")" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.7, + 0.486, + 0.711 + ], + "angle": 0, + "content": "if await done_button.count() > 0:" + }, + { + "type": "text", + "bbox": [ + 0.274, + 0.713, + 0.457, + 0.724 + ], + "angle": 0, + "content": "await done_button.click()" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.726, + 0.28, + 0.735 + ], + "angle": 0, + "content": "else:" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.737, + 0.502, + 0.749 + ], + "angle": 0, + "content": "print(\"Done button not found.\")" + }, + { + "type": "text", + "bbox": [ + 0.274, + 0.751, + 0.32, + 0.76 + ], + "angle": 0, + "content": "return" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.763, + 0.634, + 0.774 + ], + "angle": 0, + "content": "save_button = page.get_by-role(\"button\", name=\"Save\")" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.775, + 0.486, + 0.786 + ], + "angle": 0, + "content": "if await save_button.count() > 0:" + }, + { + "type": "text", + "bbox": [ + 0.274, + 0.788, + 0.457, + 0.8 + ], + "angle": 0, + "content": "await save_button.click()" + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.801, + 0.307, + 0.811 + ], + "angle": 0, + "content": "else:" + }, + { + "type": "text", + "bbox": [ + 0.274, + 0.813, + 0.502, + 0.824 + ], + "angle": 0, + "content": "print(\"Save button not found.\")" + }, + { + "type": "text", + "bbox": [ + 0.274, + 0.826, + 0.32, + 0.836 + ], + "angle": 0, + "content": "return" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "35" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.191, + 0.102, + 0.805, + 0.37 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.382, + 0.828, + 0.425 + ], + "angle": 0, + "content": "Figure 6: The agent is unable to successfully verify that the \"add collaborator to project\" works because it does not have access to a list of emails that can appropriately be used as test cases." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.453, + 0.456, + 0.469 + ], + "angle": 0, + "content": "D.2.2 Creating Functional Test Cases" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.48, + 0.828, + 0.565 + ], + "angle": 0, + "content": "Some skills require interacting with data stored on the website, and their success is conditioned not on whether the UI is accurately interacted with but rather whether the function inputs are realistic. In the following example, the agent attempts to learn how to add collaborators to a Gitlab repository, but is unable to because it does not have a source of real test input data. Future work could explore automatically generating test cases for agents for successful evaluation in dynamic websites." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.584, + 0.442, + 0.601 + ], + "angle": 0, + "content": "D.2.3 Ceiling of Agent Capabilities" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.613, + 0.825, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "36" + } + ], + [ + { + "type": "code", + "bbox": [ + 0.174, + 0.102, + 0.825, + 0.646 + ], + "angle": 0, + "content": "- Successfully navigated to the 'Cars for Sale' section, applied filters, and requested quotes for multiple listings. - Encountered issues with automatic navigation using 'Next car' link, resolved by manual URL navigation. import asyncio await page.goto(\"https://www.cars.com/shopping/\") await page.wait_for_load_state(\"networkidle\") await close_overlays(page) await page.get_by_role(\"combobox\", name=\"Make\").select_option(make) await page.get_by-role(\"combobox\", name=\"Model\").select_option(model) show_MATCHes_button = page.get_byRole(\"button\", name=\"Show matches\") if await show_MATCHes_button.count() > 0: await show_MATCHes_button.click() else: print(\"Show matches button not found.\") return await page.wait_for_load_state(\"networkidle\") for _ in range(5): await page.get_byrole(\"textbox\",name \\(=\\) \"First name\").fill(first_name) await page.get_byrole(\"textbox\",name \\(=\\) \"Last name\").fill(last_name) await page.get_byrole(\"textbox\",name \\(=\\) \"Email\").fill(email) await page.get_byrole(\"combobox\",name \\(=\\) \"Subject\").select_option( \"Get a price quote\" ) submit_button = page.get_byrole(\"button\", name=\"Email\") if await submit_button.count() > 0: await submit_button.click() else: print(\"Submit button not found.\") return await asyncio.sleep(2) next_car_link = page.get_byrole(\"link\", name \\(=\\) \"Next car\") if await next_car_link.count() > 0: await next_car_link.click() await page.wait_for_load_state(\"networkidle\") else: print(\"Next car link not found. Navigation stopped.\") break" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.66, + 0.345, + 0.675 + ], + "angle": 0, + "content": "D.2.4 Fail to call API" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.685, + 0.828, + 0.773 + ], + "angle": 0, + "content": "The agent does not call available APIs even when they are generated during exploration. As shown in Figure 7, for the task \"Save a hamburger recipe,\" the agent should first call search Recipes_by_cuisine_type('hamburger') to obtain recipe details and then select the print option to save the recipes. However, the agent fails to call this API during the task execution, indicating a gap between exploration and execution phases, suggesting the need for improving the agent's policy to better utilize generated APIs." + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.78, + 0.44, + 0.795 + ], + "angle": 0, + "content": "search_recipes_by_cuisine_type" + }, + { + "type": "code", + "bbox": [ + 0.212, + 0.818, + 0.792, + 0.91 + ], + "angle": 0, + "content": "async def searchrecipes_by_cuisine_type(page, cuisine_type):\n \"\" Searches for recipes based on a specified cuisine type on Cookpad. This function automates the process of searching for recipes by entering a cuisine type into the search functionality on the Cookpad homepage." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.961 + ], + "angle": 0, + "content": "37" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.272, + 0.102, + 0.725, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.315, + 0.825, + 0.345 + ], + "angle": 0, + "content": "Figure 7: Screenshot of cookpad.com. The agent fails to call search Recipes_by_cuisine_type('hamburger') to obtain recipe details." + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.368, + 0.825, + 0.699 + ], + "angle": 0, + "content": "Args: page: The Playwright page object to interact with. cuisine_type (str): The cuisine type to search for, e.g., 'Italian', 'Mexican', 'Korean', 'Chinese', 'American'. Usage log: - Successfully used to search for 'Italian' cuisine recipes, displaying the correct results. import re await page.get_by_role(\"textbox\", name=\"Search by recipe or ingredients\"). fill( cuisine_type ) search_button = page.get_byrole(\"button\", name \\(\\equiv\\) re.compile(\"Search\",re. IgnORECASE)) if await search_button.is Visible(): await search_button.click() else: await pagekeyboard.press(\"Enter\")" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.712, + 0.36, + 0.728 + ], + "angle": 0, + "content": "D.2.5 Wrong Parameter" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.736, + 0.828, + 0.834 + ], + "angle": 0, + "content": "In some cases, the agent correctly identifies the appropriate API but selects incorrect parameters. For example in Figure 8, in the task \"Browse recipes for gluten-free chocolate chip cookies that can be made without nuts,\" the agent incorrectly uses the parameter search Recipes_by Ingredients(page, 'chocolate chip, -nuts'), whereas the correct parameter should be search Recipes_by_Ingredients(page, 'chocolate chip without nuts'). This indicates that the agent needs better parameter selection logic to enhance performance." + }, + { + "type": "code", + "bbox": [ + 0.174, + 0.842, + 0.825, + 0.922 + ], + "angle": 0, + "content": "search_recipes_by Ingredients\nasync def search_recipes_by Ingredients(page, ingredients):\n \"\"" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "38" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.191, + 0.102, + 0.805, + 0.37 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.385, + 0.825, + 0.429 + ], + "angle": 0, + "content": "Figure 8: Screenshot of cookpad.com. The agent incorrectly uses the parameter search Recipes_by Ingredients(page, 'chocolate chip, -nuts') instead of the correct parameter search Recipes_by Ingredients(page, 'chocolate chip without nuts')" + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.507, + 0.825, + 0.907 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.489, + 0.948, + 0.509, + 0.96 + ], + "angle": 0, + "content": "39" + } + ], + [ + { + "type": "code", + "bbox": [ + 0.182, + 0.109, + 0.805, + 0.384 + ], + "angle": 0, + "content": "async def apply_multiple_filters(page, make, model, distance, zip_code, max_price): # Apply multiple filters on the Cars.com website including make, model, distance, ZIP code, and price range. await page.goto(\"https://www.cars.com/shopping/\") await close_overlays(page) await filterCars_by.make_and_model(page, make, model) await refine_search_by_distance_andzip(page, distance \\(\\equiv\\) distance,zip_code \\(\\equiv\\) zip_code) if max_price and max_price.strip(): price_select \\(=\\) page.locator(#make-model-max-price') if await price_select.count() \\(>0\\) : await price_select.select_option(value \\(\\equiv\\) str(max_price)) else: print(\"Price selector not found\") search_button \\(=\\) page.get_byRole(\"tabpanel\",name \\(\\equiv\\) \"Make\").locator(\"spark-button[datasearchtype \\(\\equiv\\) 'make']\") if await search_button.count() \\(>0\\) : await search_button.click() else: print(\"Search button not found in make panel\")" + }, + { + "type": "image_caption", + "bbox": [ + 0.227, + 0.403, + 0.765, + 0.421 + ], + "angle": 0, + "content": "Figure 9: An example of compositional API that calls multiple other APIs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.444, + 0.365, + 0.46 + ], + "angle": 0, + "content": "D.3 Compositional API" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.47, + 0.389, + 0.487 + ], + "angle": 0, + "content": "D.4 Website Roadmap API" + }, + { + "type": "code", + "bbox": [ + 0.182, + 0.511, + 0.773, + 0.698 + ], + "angle": 0, + "content": "async def navigate_toCars_for_sale(page): # Navigate to the 'Cars for Sale' section on the Cars.com website, which contains car sales information. import asyncio await page.goto(\"https://www.cars.com/\" cars_for_sale_link = page.get_by-role(\"navigation\", name=\"main menu\").get_by-role( \"link\", name=\"Cars for Sale\") if await cars_for_sale_link.count() > 0: await cars_for_sale_link.click() await asyncio.sleep(5) else: print(\"Cars for Sale link is not visible.\")" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.718, + 0.828, + 0.763 + ], + "angle": 0, + "content": "Figure 10: An example of roadmap information contained in the synthesized APIs. The docstring of this API contains the corresponding functionality of this API. If the agent is new to this website, this function might require some exploration to get." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.948, + 0.51, + 0.96 + ], + "angle": 0, + "content": "40" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07079/119db10f-4b86-43b8-bde3-42f792f5a6e7_origin.pdf b/data/2025/2504_07xxx/2504.07079/119db10f-4b86-43b8-bde3-42f792f5a6e7_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..6be2f672e5674b10263e8bca029202ded63ce18b --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/119db10f-4b86-43b8-bde3-42f792f5a6e7_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96b0b3ec182bb8e938cad64392640bcfd1c0518110a62574da8c4cc365fd682c +size 3003257 diff --git a/data/2025/2504_07xxx/2504.07079/full.md b/data/2025/2504_07xxx/2504.07079/full.md new file mode 100644 index 0000000000000000000000000000000000000000..e9b0304953c384c5fe9fc344354627bc12717682 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/full.md @@ -0,0 +1,1229 @@ +# SkillWeaver: Web Agents can Self-Improve by Discovering and Honing Skills + +Boyuan Zheng $^{1*}$ , Michael Y. Fatemi $^{2*}$ , Xiaolong Jin $^{3*}$ , Zora Zhiruo Wang $^{4}$ , Apurva Gandhi $^{4}$ , Yueqi Song $^{4}$ , Yu Gu $^{1}$ , Jayanth Srinivasa $^{5}$ , Gaowen Liu $^{5}$ , Graham Neubig $^{4}$ , Yu Su $^{1}$ + +1 The Ohio State University 2 University of Virginia 3 Purdue University + +4 Carnegie Mellon University 5 Cisco Research + +{zheng.2372, su.809}@osu.edu + +# Abstract + +To survive and thrive in complex environments, humans have evolved sophisticated self-improvement mechanisms through environment exploration, hierarchical abstraction of experiences into reusable skills, and collaborative construction of an ever-growing skill repertoire. Despite recent advancements, autonomous web agents still lack crucial self-improvement capabilities, struggling with procedural knowledge abstraction, skill refinement, and skill composition. In this work, we introduce SKILLWEAKER, a skill-centric framework that enables agents to self-improve by autonomously synthesizing reusable skills as APIs. Given a new website, the agent autonomously discovers skills, executes them for practice, and distills practice experiences into robust APIs. Iterative exploration continuously expands a library of lightweight, plug-and-play APIs, significantly enhancing the agent's capabilities. Experiments on WebArena and real-world websites demonstrate the efficacy of SKILLWEAKER, achieving relative success rate improvements of $31.8\%$ and $39.8\%$ , respectively. Additionally, APIs synthesized by strong agents substantially enhance weaker agents through transferable skills, yielding improvements of up to $54.3\%$ on WebArena. These results demonstrate the effectiveness of honing diverse website interactions into APIs, which can be seamlessly shared among various web agents. $^{1}$ + +# 1 Introduction + +AI agents based on large language models (LLMs) that can browse the web (Deng et al., 2023; Zhou et al., 2024a; Zheng et al., 2024) or use computers (Xie et al., 2024) like humans are rapidly rising as a new frontier of AI research and application. Despite these promising opportunities, digital environments present substantial challenges due to their inherent complexity and diversity. Website environments are highly intricate, consisting of numerous interactive elements that create large action spaces. An even greater challenge lies in developing generalist web agents capable of generalizing to out-of-distribution task types and adapting to novel websites. Existing efforts have attempted to train web agents using large-scale trajectory datasets collected across diverse websites and task types (Li et al., 2024; Pahuja et al., 2025). However, these agents often struggle with overfitting to specific website structures and task distributions (Li et al., 2024; Zheng et al., 2024), reducing their ability to handle previously unseen environments effectively. + +As an integral feat of human intelligence, self-improvement presents a promising solution to these challenges. Imagine a user visiting 'yelp.com' for the first time. In the beginning, she may not be familiar with the layout and functionalities provided by the website, so she + +![](images/9be3566820824df0fb4afd10c6f034a3ea92106e97fbd989b5dc6eee49f0c717.jpg) + +![](images/19d15d6e7c216d3b323d7090dd8d06675893fb6ed06369fc58e17dcdccabb40d.jpg) + +![](images/9f663991fd933a1bc51c4398502876b99a36e912bf48aed268974b20402c9bc7.jpg) + +![](images/a1cb53812cb12cd2115f6f9801c2edca9d085c54977c62ea4020be2c8ad80798.jpg) +Figure 1: An overview of the SKILLWEAKER framework. The Skill Proposal module (Stage I) identifies novel skills to practice based on observations of the environment and available APIs in the skill library. For each proposed skill, the agent executes it to generate trajectories, which are later evaluated by the reward model. If successful, the trajectory is utilized to synthesize an API (Stage II). To ensure robustness, the synthesized API undergoes testing with automatically generated test cases and debugging within the Skill Honing module (Stage III). + +![](images/4da1709e76cf9c3e41c7e70097e78dfad4e24e0974eb255b1fc58e7459842f3d.jpg) + +![](images/ac4b87be6827fc0b422426f9eb8310b6c709f5521c1d80c66dd9b72bf4771fbe.jpg) + +would explore the website and think about what tasks can be done. As she does more tasks on the website (e.g., searching for restaurants with various filters), common routines become memorized procedural knowledge (e.g., searching for high-end Korean restaurants entails three steps: type 'Korean' in the top search bar, click the 'Search' button shaped like a magnifier, and click the $$$$$' button on the search result page). Such routines abstracted from experiences essentially become high-level actions or skills that can be effortlessly summoned and carried out instead of re-learning things on the fly; they can even be composed into more complex routines, e.g., making a reservation at a restaurant involves first finding it. Similarly, through self-improvement, agents can build conceptual maps of the website environment, accumulate procedural knowledge as skills, compose simple skills into more complex ones, and leverage these learned skills to enhance their decision-making processes. + +A few recent studies have tried to capture some facets of humans' self-improvement capability. Traditional methods typically store skills implicitly through action trajectories, primarily leveraging them as demonstrations for in-context learning (Murty et al., 2024b) or fine-tuning (Murty et al., 2024a; Su et al., 2025; Pahuja et al., 2025). Although these trajectory-based approaches can be effective, they struggle to explicitly abstract reusable procedural knowledge, resulting in heavy training demands and limited generalization to new websites and tasks. Furthermore, continuously updating models with new trajectories introduces significant concerns such as catastrophic forgetting and sensitivity to website changes. Additionally, storing and sharing extensive memory-intensive trajectory data also poses practical challenges for knowledge transfer among agents. Efforts like Agent Workflow Memory (Wang et al., 2024e) and ICAL (Sarch et al., 2024) take this a step further by generating abstract, reusable routines. However, its natural language-based routines pose challenges for formal verification and precise composition into new workflows. Both methods also focus on online learning with access to test queries or offline learning requiring high-quality annotated demonstrations. Instead, we focus on more challenging autonomous exploration where agents propose novel tasks without external supervision. + +To this end, we propose SKILLWEAKER, a skill-centric framework that enables web agents to autonomously self-improve by exploring website environments and synthesizing reusable, structured skills (§ 2). Our framework leverages the ability of LLMs to iteratively curate APIs based on feedback from the interactive environment (Wang et al., 2023a; 2024d). Specifically, SKILLWEAKER comprises a three-stage pipeline (Figure 1): (1) systematic exploration of website environments to identify potential skills, (2) practicing and converting these skills + +into robust, reusable APIs, and (3) testing and debugging to ensure API reliability during inference. + +Results on WebArena (Zhou et al., 2024a) demonstrate substantial performance improvements after equipping web agents with APIs synthesized by SKILLWEAVER. We observe relative gains in success rate from $25\%$ to $38\%$ with just 160 iterations of the three-stage pipeline. Further experiments with weaker agents revealed even more dramatic improvements, ranging from $40\%$ to $130\%$ . These results underscore two insights: (1) synthesized APIs can substantially enhance agent performance, and (2) weaker agents particularly benefit from APIs generated by more powerful counterparts, effectively distilling advanced knowledge into plug-and-play modules. To further validate the practicality of our method, we conduct experiments on real-world websites using tasks sampled from Online-Mind2Web (Xue et al., 2025; Deng et al., 2023). SKILLWEAVER achieved a $39.8\%$ relative improvement in success rate, demonstrating its effectiveness in complex, dynamic web environments. + +# 2 SkillWeaver + +Our objective is to develop a framework that enables web agents to autonomously discover skills and construct a continuously expanding skill library in the form of APIs. In our work, "API" refers to a Python function containing Playwright code for a browser automation, rather than a (for example) REST API that handles HTTP requests. As illustrated in Figure 1, the exploration pipeline consists of three key modules: (1) Skill Proposal: Identifying new skills (e.g., a short description such as "Identify pill using pill identifier") for the agent to practice, facilitating the continuous discovery of relevant functionalities; (2) Skill Synthesis: Practicing the proposed skills and converting practiced trajectories into reusable Python functions, encapsulating learned behaviors into structured APIs; (3) Skill Honing: Testing and debugging the synthesized APIs using feedback from the environment and the reward model to ensure reliability. + +# 2.1 Stage I: Skill Proposal + +Website environments encompass numerous underlying functionalities that require systematic exploration. LLMs have been employed as automatic curriculum (Tajwar et al., 2025) for exploring open-ended environments, like Minecraft (Wang et al., 2023a; Du et al., 2023), TextWorld (Song et al., 2024a), and household settings (Du et al., 2023). Similarly, we leverage LLMs as an automatic curriculum for discovering website functionalities, leveraging their internet-scale knowledge and HTML interpretation capabilities. To facilitate the understanding of website environments, we provide LLMs with detailed webpage observations, including screenshots, website names, URLs, and accessibility trees. Accessibility trees are particularly useful for understanding less familiar websites, such as CMS in WebArena, because they provide comprehensive structural and functional information. + +In contrast to existing approaches (Zhou et al., 2024b; Murty et al., 2024b;a; Song et al., 2024a), we emphasize skill diversity by explicitly prompting LLMs to propose novel and reusable skills beyond the current skill repertoire. To ensure efficient exploration and high success rates in subsequent skill practice and API synthesis stages, we specifically target short-horizon, reusable skills that can be completed within a single API call. Our approach follows a curriculum progression from simple to complex skill compositions. The LLM is instructed to propose the following three types of tasks (Prompts in Appendix § A.1): + +Procedural Tasks. Procedural tasks require a sequence of atomic actions to achieve higher-level process automation objectives. Each procedural task corresponds to a workflow comprising actions that can be generalized to complete similar tasks. For example, the task identifying pills based on imprint and color involves a workflow where values are entered into textboxes, followed by clicking a submit button to finalize the search operation. + +Navigational Tasks. Navigational tasks involve systematically exploring various sections or pages within a website. Through these tasks, agents can construct conceptual maps that capture the functionality of various webpages within the website. Examples include + +navigating to the "customer reviews" section on a product management website and accessing individual user profiles. + +Information-Seeking Tasks. Information-seeking tasks involve scraping detailed data from webpages (e.g., extracting all commits from a GitHub repository). Enumerating all available items on webpages, such as comprehensive lists of reviews or faculty members, is typically challenging and requires extensive, repetitive actions. To address this challenge, generating specialized scraping APIs enables agents to efficiently retrieve all relevant information. + +# 2.2 Stage II: Skill Synthesis + +This module aims at automatically generating robust and reusable APIs, consisting of the following three components: Skill Practice, Reward Model, and API Synthesis. A critical challenge in this process is ensuring the robustness of the generated APIs, as each individual module may introduce errors. We address this challenge through curriculum progression—advancing from simple to complex skill compositions—which improves accuracy since modules perform better on simpler tasks requiring fewer steps. Additionally, the subsequent Skill Honing module (Stage III) validates APIs through unit testing and debugging to ensure reliability. + +Skill Practice. Given a task proposed in Stage I, the agent takes actions to complete it. For procedural and navigational tasks, we leverage a base agent to generate actions to repeatedly attempt to complete tasks. The agent repeatedly attempts to complete the tasks proposed in the previous section, using the reward model to determine successful completion. For information-seeking tasks, we synthesize data extraction code to gather target information from webpages, incorporating any necessary navigation steps, such as retrieving historical orders or customer information. + +Reward Model. LLMs have demonstrated effectiveness as evaluators of correctness, both for answer accuracy (Zheng et al., 2023) and for determining whether action sequences successfully complete task instructions (Pan et al., 2024; Zhuge et al., 2024). We prompt an LLM to provide reward signals indicating task completion success with the following observations in the context: (1) Task description, (2) Action trajectory comprising a sequence of actions with screenshots and corresponding descriptions, (3) Environmental feedback, including code execution outcomes and observable website changes. Further details can be found in Appendix § A.5. + +API Synthesis. This module encapsulates the agent's actions from successful trajectories into a reusable and generalized API. This is performed by creating a string representation of each of the state-action pairs in the trajectory and prompting the language model to generate a Python implementation. We statically analyze the function for common generation mistakes, as described in Appendix S A.6, and if any are detected, we prompt the model to generate the function again. As illustrated in Figure 2, each API includes a Python implementation (including a function signature, docstring, and code body), where the docstring particularly contains a usage log recording previous executions of the function and a description of the prerequisite state + +![](images/2b099ceaf2b9e84281aa8e1a61245f4853f487f25721e12edf8c65d1e63b7264.jpg) +Figure 2: An example of a synthesized API from Drugs.com used to identify pills based on their characteristics. + +of the website for the function to execute without errors. + +# 2.3 Stage III: Skill Honing + +Despite significant efforts within the above modules to ensure the robustness of the synthesized API, it is not always guaranteed. To address this limitation, we integrate a stage to test and debug synthesized APIs. For APIs requiring no extra parameters except the by default Playwright page instance, we execute the API directly as a standalone unit test. For APIs requiring additional parameters, we leverage the LLM to generate appropriate parameter values that serve as comprehensive test cases. + +# 3 Experiments + +We conducted experiments on both WebArena (Zhou et al., 2024a) and real-world live websites to evaluate our agent. WebArena is a widely used benchmark for web agent evaluation, providing a diverse set of websites designed to simulate real-world web interactions. Real-world websites further demonstrate the performance of our method in more diverse, dynamic, and complex website environments. We perform exploration on each website environment separately to derive the API library, as described in Section § 3.4. + +# 3.1 WebArena + +WebArena is a self-hostable, sandboxed web environment designed for developing and evaluating web agents. It simulates real-world websites across five common application domains: e-commerce (Shopping), social forums (Reddit), collaborative software development (Gitlab), content management (CMS), and navigation (Map). This benchmark includes a total of 812 tasks, each with automated evaluation metrics based on functional correctness. WebArena provides a stable testing environment with reliable automatic evaluation, making it a good platform for demonstrating the efficacy of our agent and analyzing various aspects of our approach. Further details on the WebArena benchmark used in our evaluation can be found in Appendix § B. + +Human-Crafted APIs. The self-hosted WebArena sandbox environment grants access to its source code and administrative access for the simulated websites. This enables us to extract APIs that adhere to standardized protocols (e.g., REST) from the official documentation within the source code as well as from external sources (Song et al., 2024a). We further leverage these human-crafted official APIs to compare them against the APIs synthesized by SKILLWEAVER, demonstrating the quality of the generated APIs. + +# 3.2 Real-World Websites + +Real-world websites present greater complexity and richness than simulated environments. To demonstrate the practicality and efficacy of our approach in real-world environments, we conduct evaluations on live websites. Specifically, we use Online-Mind2Web (Xue et al., 2025; Deng et al., 2023), a benchmark specifically designed for evaluating web agents on live websites. This benchmark encompasses 300 tasks across 136 popular websites spanning diverse domains. The tasks represent realistic scenarios commonly encountered in daily web interactions, as proposed by human annotators. Considering the cost of exploration on all the websites, we only consider websites with a sufficient number of tasks that can be accessed by Playwright. We end up with 4 websites that have at least 8 tasks to run online evaluations (Zheng et al., 2024; Yoran et al., 2024; He et al., 2024) and include 57 tasks, listed in Appendix § C. To ensure the validity of evaluation, we manually evaluate the success of agent trajectories. Specifically, we verify whether the actions fulfill all requirements posed by the task and whether retrieved information matches the target for information-seeking tasks. + +# 3.3 Agent Implementation + +By default, we use GPT-4o3 with a temperature of 0.3. Following the default WebArena evaluation settings, we set the maximum number of steps per iteration to 10. + +Baseline Agent. We implement a web agent with only browsing actions based on Code-Act (Wang et al., 2024b), which leverages an LLM to generate Python code using the Playwright browser automation library to interact with web environments through simple atomic actions such as click, type, and scroll. The observation space consists of webpage screenshots, an accessibility tree, and the execution results from previous actions, such as exceptions. + +Agent with Skills. To demonstrate the performance improvements gained from APIs, we extend the action space of the baseline agent by incorporating synthesized skills, enabling the agent to execute API calls. We leverage this agent to evaluate the performance improvements from both APIs synthesized by SKILLWEAVER and human-crafted APIs (Song et al., 2024b) from WebArena's official documentation. Given the extensive number of synthesized APIs, we introduce an API selection module that filters only relevant APIs from the API library. This module also removes APIs that do not satisfy pre-conditions. The prompt and skill serialization format can be found in Appendix § A.8. + +Weaker Agent. To evaluate whether the synthesized APIs can plug and play in weaker agents, we implement a weaker variant by replacing GPT-4o with GPT-4o-mini4 while maintaining the same agent design. + +# 3.4 Exploration + +During the exploration stage, we employ the agent described above to practice skills. Each website undergoes an exploration process consisting of 160 iterations with GPT-4o, where an iteration is defined as either attempting a proposed skill or testing an existing skill. Within each iteration, the agent proposes either a procedural or navigational task at the starting webpage. It can also propose an information-seeking task at the ending webpage after completing the procedural or navigational task in the previous iteration. During exploration, the agent can access APIs synthesized in the previous exploration steps after skill selection to compose more complex APIs. + +# 4 Results and Analysis + +# 4.1 Experimental Results + +WebArena Results. Our experiments on WebArena demonstrate consistent performance improvements with the integration of synthesized APIs. As illustrated in Table 1, we observe a substantial relative improvement in success rate, $39.8\%$ on average, for the baseline agent with GPT-4o and an even larger improvement of $54.3\%$ with GPT-4o-mini across the evaluated websites. + +When compared to AutoEval (Pan et al., 2024), which leverages an LLM-based reward model to guide inference-time exploration, SKILLWEAKER achieves higher average success rates and exhibits better or comparable performance across all domains, with the exception of the Shopping environment. The reason behind this exception is the inherent requirements for more extensive interaction with such websites for partially observable information, like dynamic product search results and product details. In comparison with SteP (Sodhi et al., 2024), which incorporates the external memory of domain-specific human-written workflows, SKILLWEAKER achieves better performance on CMS and Map environments, showing the promising quality of agent synthesized APIs even comparing with manually crafted workflows designed by human experts. + +
MethodGitlabMapShoppingCMSRedditAVG.
WebArena15.015.613.910.46.612.3
AutoEval25.027.539.620.920.826.9
*SteP32.030.037.024.059.033.0
SKILLWEAVER
GPT-4o17.827.519.818.737.722.6
+ Skills22.233.927.225.850.029.8
Δ↑25%↑23%↑38%↑38%↑33%↑32%
GPT-4o-mini6.110.311.83.318.99.2
+ Skills8.916.717.17.726.414.1
Δ↑46%↑62%↑46%↑133%↑40%↑45%
+ +Table 1: Task success rate on WebArena. The numbers in green represent the relative improvement percentage. + +
MethodDrugFlightCookingCarAVG.
Baseline65.011.762.511.140.2
+ Skills87.029.475.011.156.2
Δ↑34%↑151%↑20%↑0%↑40%
+ +Table 2: Success rate in real-world website evaluation based on Online-Mind2Web. + +Live Website Results. As shown in Table 2, integrating synthesized APIs leads to an average relative improvement of $39.8\%$ in success rate across four websites. For the Car website, while the final success rate remained unchanged after incorporating synthesized APIs, we observed that in four out of nine tasks, the synthesized APIs successfully guided the agent to the final state required for task completion. However, the agent still encountered failures during the final steps that demanded strong environment understanding and visual reasoning. + +# 4.2 Analysis + +Generalization across Website. Generalization across diverse websites, particularly less common websites, remains a significant challenge. SKILLWEAVER presents a new angle to this challenge by integrating website-specific skills and knowledge through external memory in synthesized skill files. SKILLWEAVER is able to encapsulate skills and knowledge on website environments into code files containing accumulated skills. + +To demonstrate the viability of such generalization, we don't include any website specific in the agent and adapt the agent to different websites by purely integrating synthesized API libraries generated during pre-hoc website explorations. As shown in both Table 1 and Table 2, our agent is not only able to achieve stable performance improvements across all 5 WebArena websites but also in the 4 real-world live websites that are more complex and noisy. + +Generalization across Agents. A critical feature of the skill repertoire is its ability to generalize across different web agents. Once the exploration process is completed, the synthesized API library can be reused to expand and enhance the action space of other agents capable of API calling. We conduct a comparative experiment by replacing the backbone language model with GPT-4o-mini without any other modification to the agent. + +According to the results in Table 1, after switching the language model to GPT-4o-mini, the performance of the baseline agent is substantially weaker than other agents based on GPT-4o. However, upon integration with our synthesized APIs, the agent based on GPT-4o-mini demonstrates remarkable relative success rate improvements ranging from $40\%$ to $133\%$ across five websites. Notably, this agent even achieves a better average success rate compared to the WebArena agent based on GPT-4o, particularly in the Map, Shopping, and + +Reddit websites. These results highlight the transferability of our synthesized APIs as a plug-and-play module to enhance agents. + +As the base agents become stronger, particularly those capable of more sophisticated inference-time search (e.g., Operator), the potential of SKILLWEAVER can be further unleashed. While current agents can accomplish challenging tasks through trial-and-error (Song et al., 2024a) and search over webpages Koh et al. (2024), certain complex skills remain beyond their capabilities. For example, as illustrated in Appendix D.2.3, the task request quote_for_multiple listings requires repeated, sequential, successful searches for specific vehicles, followed by separate quote requests. This process demands both long-range planning and back-tracing capabilities, which present significant challenges for existing agents. As agents grow stronger, SKILLWEAVER will enable the synthesis of increasingly sophisticated and complex skills. + +Comparison with Human-Crafted APIs. We further conducted a comparison of the performance of agent with synthesized skills with the human-crafted APIs extracted from the official documentation and outsourcing websites (Song et al., 2024a). Based on the level of API support, websites are classified into three categories: low, medium, and high. + +As shown in Figure 3, the performance of the synthesized APIs is comparable with human-crafted APIs on websites with low API support, such as Reddit, and those with medium API support, like Shopping. This result suggests that our exploration process is capable of generating APIs with quality that is comparable to or even superior to manually crafted APIs found in official documentation. On websites with high API support, such as GitLab and Maps, the performance of the synthesized APIs is not as good. + +![](images/d73876740ac36fe6ed2add813b290f0783025673f627d1b0da9c0f205484f2f1.jpg) +Figure 3: Success rate with synthesized vs. human-crafted APIs. + +# 4.3 Case Studies + +Emergence of Compositional APIs. After a certain number of exploration iterations, we observe that the pipeline begins to generate compositional APIs that call multiple simpler APIs. An example is shown in § D.3. This API is responsible for applying multiple filters to search results that involves sequentially calling another API to close a webpage overlay, followed by two additional APIs to refine the search. This emergent behavior indicates an increasing level of abstraction and efficiency in SKILLWEAKER's ability to synthesize and integrate discovered skills. + +Limitation in Skill Use. LLMs like GPT-4o are still not robust enough at API calling, and that hurts API-augmented agents like ours, even with human-crafted APIs. This challenge is even greater for weaker LLMs such as GPT-4o-mini, as illustrated in Table 1. We identify two primary categories of failures: (1) failure to identify the appropriate API and (2) generating wrong parameters. For example, SKILLWEAKER synthesized APIs to augment agents to search recipes in Cookpad.com. As demonstrated in Appendix D.2.4, the LLM is not able to identify the search Recipes_by_cuisine_type('hamburger') API to finish the task "Save a hamburger recipe". In Appendix D.2.5, the LLM successfully identifies the right API search Recipes_by Ingredients(page, 'ingredients') to complete the task "Browse recipes for gluten-free chocolate chip cookies that can be made without nuts". However, it generates the wrong keyword 'chocolate chip, -nuts' instead of 'chocolate chip without nuts', leading to empty search results. + +# 5 Related Work + +Web Agents. Automated web agents have recently emerged as a critical research direction with the vision of automating workflows in the internet. Substantial progress in web agents has been achieved by leveraging powerful LLMs alongside diverse prompting strategies to automate the prediction of web page actions (Zheng et al., 2024; Furuta et al., 2024; Zhou et al., 2024a; Yao et al., 2022). Given inputs like HTML content, accessibility trees, and screenshots, LLM-based approaches use in-context examples, skill usage, and the ReAct framework (Yao et al., 2023) to improve action prediction. These models can be further enhanced with action-coordinate pair data for better grounding (Cheng et al., 2024; Gou et al., 2024; You et al., 2024), learning from multiple web agent trajectories to improve multi-step reasoning and planning (Lai et al., 2024; Shaw et al., 2023; Deng et al., 2023), or training the agent to directly output pixel coordinates (Hong et al., 2024). Another line of work uses planning. Tree search agent (Koh et al., 2024) performs a best-first tree search using an LLM as a heuristic, and AgentQ (Putta et al., 2024) and WebPilot (Zhang et al., 2024) employ Monte Carlo Tree Search. The World-model-augmented web agent (Chae et al., 2024) and WebDreamer (Gu et al., 2024b) integrate LLM-based world models to anticipate the outcomes of its actions. + +Tool Use and Synthesis. With the great potentials exhibited by tool-augmented LMs (Schick et al., 2023; Wang et al., 2024c), many works explore using LLMs to make tools across math reasoning (Cai et al., 2024; Qian et al., 2023; Yuan et al., 2024a), theorem proving (Wang et al., 2024a), structured data analysis (Lu et al., 2023; Wang et al., 2024d), and digital agent automation (Wang et al., 2023a; Gu et al., 2024a; Song et al., 2024b). However, existing methods rely on a set of existing training examples to determine tool correctness (Yuan et al., 2024a), or user input queries to bootstrap the tool-making system (Wang et al., 2024d). Our work does not necessitate annotated data and enables agents to gather experiences by self-exploring the environment while synthesizing more skills. + +Self-Improvement. Many works explore collecting trajectories through experience, rating the success with a reward model, and using the results for policy training (Patel et al., 2024; Huang et al., 2023; Madaan et al., 2023; Wang et al., 2023b; Singh et al., 2024; Chen et al., 2024; Yuan et al., 2024b). These approaches can be broadly categorized into parametric and non-parametric approaches. Parametric training with exploration trajectories has been investigated using various strategies, such as supervised fine-tuning (e.g., WebGUM (Furuta et al., 2024), Patel et al. (2024), and ScribeAgent (Shen et al., 2024)) and reinforcement learning-style optimization (e.g., ETO (Song et al., 2024a) and PAE (Zhou et al., 2024b)). Non-parametric methods typically transform exploration trajectories into reusable APIs or workflows that can be composed into more complex skills, like Voyager (Wang et al., 2023a) an LLM-based lifelong learning agent that continually explores the Minecraft environment. Friday (Wu et al., 2024) also investigates self-improvement on operating systems through exploration with pre-defined learning objectives. For web agents, the collected trajectories can be used as examples for in-context learning (Murty et al., 2024b), provide targets for fine-tuning (Furuta et al., 2024; Patel et al., 2024; Song et al., 2024a), and be distilled into text instructions to reproduce the desired behavior (workflows) (Wang et al., 2024e). Our work focuses on enabling web agents to continuously expand their skill repertoire by autonomously exploring website environments and distilling successful trajectories into reusable skills represented as Python APIs. These APIs are created to enable the agent to operate more efficiently during subsequent exploration and task execution. + +# 6 Conclusion + +Web agents aim to automate browsing tasks to enhance human productivity across diverse digital environments. A key challenge lies in adapting to real-world websites characterized by high diversity and complexity. Inspired by the self-improvement mechanism of humans, we propose SKILLWEAYER, a skill-centric framework that enables web agents to autonomously self-improve through exploration and API synthesis. Experimental results demonstrate substantial performance improvements, with relative success rate gains of $31.8\%$ on WebArena benchmark and $39.8\%$ on real-world websites. Notably, weaker agents + +equipped with skills from more powerful counterparts showed improvements of up to $54.3\%$ , demonstrating effective knowledge distillation through lightweight, transferable modules. Our findings point to a promising future where web agents can learn to improve themselves without parameter tuning by evolving a collective skill base. + +# Acknowledgments + +We would like to extend our appreciation to colleagues from the OSU NLP group and Anqi Feng for their insightful comments. This work is supported by a gift from Cisco. + +# Ethics Statement + +Generalist web agents hold the potential to automate routine web tasks, enhance user experiences, and promote web accessibility, safety concerns related to their real-world deployment are also critical. These concerns span privacy issues, such as access to users' personal profiles, and sensitive operations, such as financial transactions or application form submissions. During online evaluation, we noticed the possibility of these web agents generating harmful actions on the web, and we manually validated the safety of all the actions before execution. Exploration on live website may lead to potential safety concerns about trigger harmful actions. In case the agent is being used on a live website, we add safety instructions which guardrail the agent away from actions that could potentially have side effects or that would otherwise be disadvantageous to explore, such as creating accounts or interacting with human users. It is critical for further research to thoroughly assess and mitigate the safety risks associated with web agents, ensuring they are safeguarded against producing and executing harmful actions. The code will also be released solely for research purposes, with the goal of making the web more accessible via language technologies under an OPEN-RAIL license. We are strongly against any potentially harmful use of the data or technology by any party. + +# References + +Tianle Cai, Xuezhi Wang, Tengyu Ma, Xinyun Chen, and Denny Zhou. Large language models as tool makers. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=qV83K9d5WB. +Hyungjoo Chae, Namyoung Kim, Kai Tzu iunn Ong, Minju Gwak, Gwanwoo Song, Ji-hoon Kim, Sunghwan Kim, Dongha Lee, and Jinyoung Yeo. Web agents with world models: Learning and leveraging environment dynamics in web navigation. ArXiv, abs/2410.13232, 2024. URL https://api-semanticscholar.org/CorpusID:273404026. +Zixiang Chen, Yihe Deng, Huizhuo Yuan, Kaixuan Ji, and Quanquan Gu. Self-play finetuning converts weak language models to strong language models. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=04cHTxW9BS. +Kanzhi Cheng, Qiushi Sun, Yougang Chu, Fangzhi Xu, Yantao Li, Jianbing Zhang, and Zhiyong Wu. Seeclick: Harnessing GUI grounding for advanced visual GUI agents. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August 11-16, 2024, pp. 9313-9332. Association for Computational Linguistics, 2024. doi: 10.18653/V1/2024.ACL-LONG.505. URL https://doi.org/10.18653/v1/2024.acl-long.505. +Xiang Deng, Yu Gu, Boyuan Zheng, Shijie Chen, Samual Stevens, Boshi Wang, Huan Sun, and Yu Su. Mind2web: Towards a generalist agent for the web. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine + +(eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitnet/5950bf290a1570ea401bf98882128160-Abstract-Datasets_and_Benchmarks.html. +Yuqing Du, Olivia Watkins, Zihan Wang, Cedric Colas, Trevor Darrell, Pieter Abbeel, Abhishek Gupta, and Jacob Andreas. Guiding pretraining in reinforcement learning with large language models. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett (eds.), International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, volume 202 of Proceedings of Machine Learning Research, pp. 8657-8677. PMLR, 2023. URL https://proceedings.mlr.press/v202/du23f.html. +Hiroki Furuta, Kuang-Huei Lee, Ofir Nachum, Yutaka Matsuo, Aleksandra Faust, Shixiang Shane Gu, and Izzeddin Gur. Multimodal web navigation with instructionfinetuned foundation models. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=efFmBWioSc. +Boyu Gou, Ruohan Wang, Boyuan Zheng, Yanan Xie, Cheng Chang, Yiheng Shu, Huan Sun, and Yu Su. Navigating the digital world as humans do: Universal visual grounding for gui agents. arXiv preprint arXiv:2410.05243, 2024. +Yu Gu, Yiheng Shu, Hao Yu, Xiao Liu, Yuxiao Dong, Jie Tang, Jayanth Srinivasa, Hugo Latapie, and Yu Su. Middleware for llms: Tools are instrumental for language agents in complex environments. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pp. 7646-7663. Association for Computational Linguistics, 2024a. URL https://aclanthology.org/2024.emnlp-main.436. +Yu Gu, Kai Zhang, Yuting Ning, Boyuan Zheng, Boyu Gou, Tianci Xue, Cheng Chang, Sanjari Srivastava, Yanan Xie, Peng Qi, Huan Sun, and Yu Su. Is your llm secretly a world model of the internet? model-based planning for web agents, 2024b. URL https://arxiv.org/abs/2411.06559. +Hongliang He, Wenlin Yao, Kaixin Ma, Wenhao Yu, Yong Dai, Hongming Zhang, Zhenzhong Lan, and Dong Yu. Webvoyager: Building an end-to-end web agent with large multimodal models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August 11-16, 2024, pp. 6864-6890. Association for Computational Linguistics, 2024. doi: 10.18653/V1/2024.ACL-LONG.371. URL https://doi.org/10.18653/v1/2024.acl-long.371. +Wenyi Hong, Weihan Wang, Qingsong Lv, Jiazheng Xu, Wenmeng Yu, Junhui Ji, Yan Wang, Zihan Wang, Yuxiao Dong, Ming Ding, and Jie Tang. Cogagent: A visual language model for gui agents. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14281-14290, 2024. +Jiaxin Huang, Shixiang Gu, Le Hou, Yuexin Wu, Xuezhi Wang, Hongkun Yu, and Jiawei Han. Large language models can self-improve. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, EMNLP 2023, Singapore, December 6-10, 2023, pp. 1051-1068. Association for Computational Linguistics, 2023. doi: 10.18653/V1/2023.EMNLP-MAIN.67. URL https://doi.org/10.18653/v1/2023.emnlp-main.67. +Jing Yu Koh, Stephen McAleer, Daniel Fried, and Ruslan Salakhutdinov. Tree search for language model agents. CoRR, abs/2407.01476, 2024. doi: 10.48550/ARXIV.2407.01476. URL https://doi.org/10.48550/arXiv.2407.01476. +Hanyu Lai, Xiao Liu, Iat Long Iong, Shuntian Yao, Yuxuan Chen, Pengbo Shen, Hao Yu, Hanchen Zhang, Xiaohan Zhang, Yuxiao Dong, et al. Autowebglm: A large language + +model-based web navigating agent. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, pp. 5295-5306, 2024. +Wei Li, William E. Bishop, Alice Li, Christopher Rawles, Folawiyo Campbell-Ajala, Divya Tyamagundlu, and Oriana Riva. On the effects of data scale on UI control agents. In Amir Globersons, Lester Mackey, Danielle Belgrave, Angela Fan, Ulrich Paquet, Jakub M. Tomczak, and Cheng Zhang (eds.), Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024, 2024. URL http://papers.nips.cc/paper_files/paper/2024/bitstream/a79f3ef3b445fd4659f44648f7ea8ffd-Abstract-Datasets_and_Benchmarks_Track.html. +Pan Lu, Baolin Peng, Hao Cheng, Michel Galley, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, and Jianfeng Gao. Chameleon: Plug-and-play compositional reasoning with large language models. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/black/871ed095b734818cfba48db6aab25a62-Abstract-Conference.html. +Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegrefe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, Shashank Gupta, Bodhisattwa Prasad Majumder, Katherine Hermann, Sean Welleck, Amir Yazdanbakhsh, and Peter Clark. Self-refine: Iterative refinement with self-feedback. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bit/91edff07232fb1b55a505a9e9f6c0ff3-Abstract-Conference.html. +Shikhar Murty, Dzmitry Bahdanau, and Christopher D. Manning. Nnetscape navigator: Complex demonstrations for web agents without a demonstrator. CoRR, abs/2410.02907, 2024a. doi: 10.48550/ARXIV.2410.02907. URL https://doi.org/10.48550/arXiv.2410.02907. +Shikhar Murty, Christopher D. Manning, Peter Shaw, Mandar Joshi, and Kenton Lee. BAGEL: bootstrapping agents by guiding exploration with language. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024b. URL https://openreview.net/forum?id=VsvfSMI5bs. +Vardaan Pahuja, Yadong Lu, Corby Rosset, Boyu Gou, Arindam Mitra, Spencer Whitehead, Yu Su, and Ahmed Awadallah. Explorer: Scaling exploration-driven web trajectory synthesis for multimodal web agents. 2025. URL https://api-semanticscholar.org/ CorpusID:276408442. +Jiayi Pan, Yichi Zhang, Nicholas Tomlin, Yifei Zhou, Sergey Levine, and Alane Suhr. Autonomous evaluation and refinement of digital agents. CoRR, abs/2404.06474, 2024. doi: 10.48550/ARXIV.2404.06474. URL https://doi.org/10.48550/arXiv.2404.06474. +Ajay Patel, Markus Hofmarcher, Claudiu Leoveanu-Condrei, Marius-Constantin Dinu, Chris Callison-Burch, and Sepp Hochreiter. Large language models can self-improve at web agent tasks. CoRR, abs/2405.20309, 2024. doi: 10.48550/ARXIV.2405.20309. URL https://doi.org/10.48550/arXiv.2405.20309. +Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent Q: advanced reasoning and learning for autonomous AI agents. CoRR, abs/2408.07199, 2024. doi: 10.48550/ARXIV.2408.07199. URL https://doi.org/10.48550/arXiv.2408.07199. +Cheng Qian, Chi Han, Yi Fung, Yujia Qin, Zhiyuan Liu, and Heng Ji. CREATOR: Tool creation for disentangling abstract and concrete reasoning of large language models. In The 2023 Conference on Empirical Methods in Natural Language Processing, 2023. URL https://openreview.net/forum?id=aCHq10rQiH. + +Gabriel Sarch, Lawrence Jang, Michael J. Tarr, William W. Cohen, Kenneth Marino, and Katerina Fragkiadaki. Ical: Continual learning of multimodal agents by transforming trajectories into actionable insights. ArXiv, abs/2406.14596, 2024. URL https://api.sementicscholar.org/CorpusID:274466462. +Timo Schick, Jane Dwivedi-Yu, Roberto Dessi, Roberta Raileanu, Maria Lomeli, Eric Hambro, Luke Zettlemoyer, Nicola Cancedda, and Thomas Scialom. Toolformer: Language models can teach themselves to use tools. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitstream/d842425e4bf79ba039352da0f658a906-Abstract-Conference.html. +Peter Shaw, Mandar Joshi, James Cohan, Jonathan Berant, Panupong Pasupat, Hexiang Hu, Urvashi Khandelwal, Kenton Lee, and Kristina Toutanova. From pixels to UI actions: Learning to follow instructions via graphical user interfaces. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitstream/6c52a8a4fadc9129c6e1d1745f2dfd0f-Abstract-Conference.html. +Junhong Shen, Atishay Jain, Zedian Xiao, Ishan Amlekar, Mouad Hadji, Aaron Podolny, and Ameet Talwalkar. Scribeagent: Towards specialized web agents using production-scale workflow data. ArXiv, abs/2411.15004, 2024. URL https://api(semanticscholar.org/ CorpusID:274192657. +Avi Singh, John D. Co-Reyes, Rishabh Agarwal, Ankesh Anand, Piyush Patil, Xavier Garcia, Peter J. Liu, James Harrison, Jaehoon Lee, Kelvin Xu, Aaron T. Parisi, Abhishek Kumar, Alexander A. Alemi, Alex Rizkowsky, Azade Nova, Ben Adlam, Bernd Bohnet, Gamaleldin Fathy Elsayed, Hanie Sedghi, Igor Mordatch, Isabelle Simpson, Izzeddin Gur, Jasper Snoek, Jeffrey Pennington, Jiri Hron, Kathleen Kenealy, Kevin Swersky, Kshitteej Mahajan, Laura Culp, Lechao Xiao, Maxwell L. Bileschi, Noah Constant, Roman Novak, Rosanne Liu, Tris Warkentin, Yundi Qian, Yamini Bansal, Ethan Dyer, Behnam Neyshabur, Jascha Sohl-Dickstein, and Noah Fiedel. Beyond human data: Scaling self-training for problem-solving with language models. Trans. Mach. Learn. Res., 2024, 2024. URL https://openreview.net/forum?id=1NAyUngGFK. +Paloma Sodhi, S. R. K. Branavan, Yoav Artzi, and Ryan McDonald. Step: Stacked llm policies for web actions, 2024. URL https://arxiv.org/abs/2310.03720. +Yifan Song, Da Yin, Xiang Yue, Jie Huang, Sujian Li, and Bill Yuchen Lin. Trial and error: Exploration-based trajectory optimization for LLM agents. CoRR, abs/2403.02502, 2024a. doi: 10.48550/ARXIV.2403.02502. URL https://doi.org/10.48550/arXiv.2403.02502. +Yueqi Song, Frank F. Xu, Shuyan Zhou, and Graham Neubig. Beyond browsing: Api-based web agents. ArXiv, abs/2410.16464, 2024b. URL https://api-semanticscholar.org/ CorpusID:273507298. +Hongjin Su, Ruoxi Sun, Jinsung Yoon, Pengcheng Yin, Tao Yu, and Sercan Ö. Arik. Learn-by-interact: A data-centric framework for self-adaptive agents in realistic environments. CoRR, abs/2501.10893, 2025. doi: 10.48550/ARXIV.2501.10893. URL https://doi.org/10.48550/arXiv.2501.10893. +Fahim Tajwar, Yiding Jiang, Abitha Thankaraj, Sumaita Sadia Rahman, J. Zico Kolter, Jeff Schneider, and Ruslan Salakhutdinov. Training a generally curious agent. CoRR, abs/2502.17543, 2025. doi: 10.48550/ARXIV.2502.17543. URL https://doi.org/10.48550/arXiv.2502.17543. +Guanzhi Wang, Yuqi Xie, Yunfan Jiang, Ajay Mandlekar, Chaowei Xiao, Yuke Zhu, Linxi (Jim) Fan, and Anima Anandkumar. Voyager: An open-ended embodied agent + +with large language models. Trans. Mach. Learn. Res., 2024, 2023a. URL https://api.sementicscholar.org/CorpusID:258887849. +Haiming Wang, Huajian Xin, Chuanyang Zheng, Zhengying Liu, Qingxing Cao, Yinya Huang, Jing Xiong, Han Shi, Enze Xie, Jian Yin, Zhenguo Li, and Xiaodan Liang. LEGO: Neural theorem proving with growing libraries. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=3f5PALef5B. +Xingyao Wang, Yangyi Chen, Lifan Yuan, Yizhe Zhang, Yunzhu Li, Hao Peng, and Heng Ji. Executable code actions elicit better LLM agents. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024b. URL https://openreview.net/forum?id=jJ9BoXAfFa. +Yizhong Wang, Yeganeh Kordi, Swaroop Mishra, Alisa Liu, Noah A. Smith, Daniel Khashabi, and Hannaneh Hajishirzi. Self-instruct: Aligning language models with self-generated instructions. In Anna Rogers, Jordan L. Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2023, Toronto, Canada, July 9-14, 2023, pp. 13484-13508. Association for Computational Linguistics, 2023b. doi: 10.18653/V1/2023.ACL-LONG.754. URL https://doi.org/10.18653/v1/2023.acl-long.754. +Zhiruo Wang, Zhoujun Cheng, Hao Zhu, Daniel Fried, and Graham Neubig. What are tools anyway? a survey from the language model perspective. In First Conference on Language Modeling, 2024c. URL https://openreview.net/forum?id=Xh1B90iBSR. +Zhiruo Wang, Graham Neubig, and Daniel Fried. TroVE: Inducing verifiable and efficient toolboxes for solving programmatic tasks. In *Forty-first International Conference on Machine Learning*, 2024d. URL https://openreview.net/forum?id=DCNCwaMJjI. +Zora Zhiruo Wang, Jiayuan Mao, Daniel Fried, and Graham Neubig. Agent workflow memory. arXiv preprint arXiv:2409.07429, 2024e. +Zhiyong Wu, Chengcheng Han, Zichen Ding, Zhenmin Weng, Zhoumianze Liu, Shunyu Yao, Tao Yu, and Lingpeng Kong. Os-copilot: Towards generalist computer agents with self-improvement. CoRR, abs/2402.07456, 2024. doi: 10.48550/ARXIV.2402.07456. URL https://doi.org/10.48550/arXiv.2402.07456. +Tianbao Xie, Danyang Zhang, Jixuan Chen, Xiaochuan Li, Siheng Zhao, Ruisheng Cao, Toh Jing Hua, Zhoujun Cheng, Dongchan Shin, Fangyu Lei, Yitao Liu, Yiheng Xu, Shuyan Zhou, Silvio Savarese, Caiming Xiong, Victor Zhong, and Tao Yu. Osworld: Benchmarking multimodal agents for open-ended tasks in real computer environments. CoRR, abs/2404.07972, 2024. doi: 10.48550/ARXIV.2404.07972. URL https://doi.org/10.48550/arXiv.2404.07972. +Tianci Xue, Weijian Qi, Tianneng Shi, Chan Hee Song, Boyu Gou, Dawn Song, Huan Sun, and Yu Su. An illusion of progress? assessing the current state of web agents. OSU NLP Blog, Mar 2025. URL https://tinyurl.com/online-mind2web-blog. +Shunyu Yao, Howard Chen, John Yang, and Karthik Narasimhan. Webshop: Towards scalable real-world web interaction with grounded language agents. In Sanmi Koyejo, S. Mohamed, A. Agarwal, Danielle Belgrave, K. Cho, and A. Oh (eds.), Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, New Orleans, LA, USA, November 28 - December 9, 2022, 2022. URL http://papers.nips.cc/paper_files/paper/2022/bit/82ad13ec01f9fe44c01cb91814fd7b8c-Abstract-Conference.htm1. +Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik R. Narasimhan, and Yuan Cao. React: Synergizing reasoning and acting in language models. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net, 2023. URL https://openreview.net/forum?id=WE_vluYUL-X. + +Ori Yoran, Samuel Joseph Amouyal, Chaitanya Malaviya, Ben Boin, Ofir Press, and Jonathan Berant. Assistantbench: Can web agents solve realistic and time-consuming tasks? In Conference on Empirical Methods in Natural Language Processing, 2024. URL https://api_semanticscholar.org/CorpusID:271328691. +Keen You, Haotian Zhang, Eldon Schoop, Floris Weers, Amanda Swearngin, Jeffrey Nichols, Yinfei Yang, and Zhe Gan. Ferret-ui: Grounded mobile UI understanding with multimodal llms. In Ales Leonardis, Elisa Ricci, Stefan Roth, Olga Russakovsky, Torsten Sattler, and Gül Varol (eds.), Computer Vision - ECCV 2024 - 18th European Conference, Milan, Italy, September 29-October 4, 2024, Proceedings, Part LXIV, volume 15122 of Lecture Notes in Computer Science, pp. 240-255. Springer, 2024. doi: 10.1007/978-3-031-73039-9\_.14. URL https://doi.org/10.1007/978-3-031-73039-9_14. +Lifan Yuan, Yangyi Chen, Xingyao Wang, Yi Fung, Hao Peng, and Heng Ji. CRAFT: Customizing LLMs by creating and retrieving from specialized toolsets. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=G0vdDSt9XM. +Weizhe Yuan, Richard Yuanzhe Pang, Kyunghyun Cho, Xian Li, Sainbayar Sukhbaatar, Jing Xu, and Jason Weston. Self-rewarding language models. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024b. URL https://openreview.net/forum?id=ØNphYCmgua. +Yao Zhang, Zijian Ma, Yunpu Ma, Zhen Han, Yu Wu, and Volker Tresp. Webpilot: A versatile and autonomous multi-agent system for web task execution with strategic exploration. CoRR, abs/2408.15978, 2024. doi: 10.48550/ARXIV.2408.15978. URL https://doi.org/10.48550/arXiv.2408.15978. +Boyuan Zheng, Boyu Gou, Jihyung Kil, Huan Sun, and Yu Su. Gpt-4v(isdiction) is a generalist web agent, if grounded. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=piecKJ2D1B. +Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. Judging llm-as-a-judge with mt-bench and chatbot arena. Advances in Neural Information Processing Systems, 36:46595-46623, 2023. +Shuyan Zhou, Frank F. Xu, Hao Zhu, Xuhui Zhou, Robert Lo, Abishek Sridhar, Xianyi Cheng, Tianyue Ou, Yonatan Bisk, Daniel Fried, Uri Alon, and Graham Neubig. Webarena: A realistic web environment for building autonomous agents. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024a. URL https://openreview.net/forum?id=oKn9c6ytLx. +Yifei Zhou, Qianlan Yang, Kaixiang Lin, Min Bai, Xiong Zhou, Yu-Xiong Wang, Sergey Levine, and Li Erran Li. Proposer-agent-evaluator(pae): Autonomous skill discovery for foundation model internet agents. CoRR, abs/2412.13194, 2024b. doi: 10.48550/ARXIV.2412.13194. URL https://doi.org/10.48550/arXiv.2412.13194. +Mingchen Zhuge, Changsheng Zhao, Dylan Ashley, Wenyi Wang, Dmitrii Khizbullin, Yunyang Xiong, Zechun Liu, Ernie Chang, Raghuraman Krishnamoorthi, Yuandong Tian, et al. Agent-as-a-judge: Evaluate agents with agents. arXiv preprint arXiv:2410.10934, 2024. + +# Table of Content: + +- Appendix A: Prompts + +- Appendix A.1: Skill Proposal +- Appendix A.2: Code Generation Agent Context Representation +- Appendix A.3: Code Generation Agent Error Detection +- Appendix A.4: Action Synthesis +- Appendix A.5: Success Checking +- Appendix A.6: Persisting Attempt to Knowledge Base +- Appendix A.7: Scraping Prompts + +- Appendix B: WebArena Benchmark +- Appendix C: Real World Website Tasks +- Appendix D: Example APIs + +- Appendix D.1: Success Cases +- Appendix D.2: Failure Cases +- Appendix D.3: Composition API. + +# A Prompts + +# A.1 Skill Proposal + +In case the agent is being used on a live website, we add safety instructions which bias the agent away from actions that could potentially have side effects or that would otherwise be disadvantageous to explore, such as creating accounts or interacting with human users. + +# Proposing Tasks to Explore + +System: You propose tasks that would make good 'tools' for external users of a website. User: + +You are a 'web agent' who is learning how to use a website. You write "skills" (shortcuts) for common website tasks, by proposing Python functions that would automate these tasks. + +You have already proposed the following skills: + +{procedural knowledge} + + +You have built up the following knowledge about the website (in addition to the current screenshot): {semantic_knowledge} + +Now please come up with something new to learn how to do on this website. The website is structured according to the following accessibility tree hierarchy: {ax_tree} + +Do not interact with the Advanced Reporting tab if you are using +Magenta. +Do not interact with login/login/user accounts on any site. +If you're on OpenStreetMap, don't interact with community featur + +Write a list of useful skills/shortcuts that you would want to have built into a website as Python functions. Write the name in natural language format. Do not use $*\_id$ as a parameter in your skill. Again, your goal is to generate functions that would be useful "shortcuts" for users of the website, so you should prioritize generating skills that compress a couple interactions into a single function call. Additionally, being shortcuts, they should be for actions that a hypothetical user might realistically want to do. + +# Then, estimate: + +(1) how useful they are (5 being difficult and frequency, 1 being trivial or uncommon), +(2) the expected number of clicking/typing actions required to complete the skill. (calculate this by writing the list of steps and counting AFTERWARDs) + +Prefer to generate skills that are creating, modifying, or filtering/querying data on the website, as these tend to be more useful. + +Do not generate skills simply to perform single clicks. + +{safetyInstructions} + +Then, calculate the sum of these ratings for each skill. Finally, select the skill with the highest rating. + +Write your ratings in `step_by_step_reasoning`. Then, write your skill choice in `proposed_skill`. + +# A.2 Code Generation Agent Context Representation + +# A.2.1 States as Accessibility Trees + +We represent the state as a tuple containing an screenshot of the page, a title, a URL, and an accessibility tree hierarchy. These are serialized by concatenating clean string representations of each of these properties. We represent each node in the accessibility hierarchy as a line containing (1) indentation representing the depth in the tree, (2) the accessible role, (3) the accessible name of the element, in quotation marks, and finally (4) any other accessibility tree information (represented as a ia- attributes in the DOM.) + +# A.2.2 Prompts + +Substrings in square brackets ([]) are omitted if not applicable. + +Code Agent Context Representation +```tcl +URL: {relative_url} +{ state_accessibility_tree_string } + +{step_by_step_reasoning} + + +{action_source} + +[ +{stdout} +] +[ +{return_value} +] +[< warnings> +{ warnings} +] +[ +{ recovered_exception} +, ...] +[ +{unrecoverable_exception} +] +URL: {relative_url} +``` + +```txt +{state_accessibility_tree_string} + +... + +URL: {relative_url} +{state_accessibility_tree_string} + + +{result} + +``` + +# A.3 Code Generation Agent Error Detection + +This is the prompt template used if we detect any errors in the agent's code. We use the following error messages: + +1. A function named act not provided: Function name must be 'act' +2. The function act does not contain a single argument, page: Function must take exactly one argument: 'page'. +3. The function function is disabled: Function 'fnname' is disabled. Please use another function. +4. Incorrect or buggy Playwright functions: Please use the 'page.get_by...().f()' functions instead of the 'page.f(selector)' functions. where $f \in \{click, fill, type\}$ . +5. Use of CSS selectors instead of accessibility tree selectors: Please use Accessibility Tree-centric selectors, like 'page.get_by_role()', '.nth()', instead of the CSS-style selectors like '.locator' or '.query_selector()'. +6. Blank response: You did not provide any Python code, but you also did not provide a result for 'terminate_with_result'. Please provide one or the other. +7. Type errors: Type Error: {error} + +# A.4 Action Synthesis + +# Code Agent Action Generation + +You generate Playwright code to interact with websites. Words of wisdom: + +- If you want to click a generic button (e.g., that belongs to an element), use the full .get_by-role() path to the element (e.g., .get_by-role("group", name="Test Item").get_by-role("button", name="Go") instead of .get_by-role("button", name="Go"), as this is ambiguous). +- Write exceptionally correct Python code. +- You love to take advantage of functions in the knowledge_base whenever possible. You use them via Python function calls. It is required to use the knowledge base function corresponding to an action if it exists. +- Use relative goto when you can. +are currently on a webpage titled {title_repr}, with the URL {url_repr}. +der the following Accessibility Tree. The accessibility tree is formatted like this: + +```handlebars +[role] ["name"] +[properties] {{{child1]}}; +[role] ["name"] +[properties]; // no children +Here is an example: + +article "Willy Wonka's Chocolate Factory Opens Its Gates" {{{link "Share";}}} +}; +article "Hogwarts Begins Accepting Applicants for Class of 2029" {{{link "Share";}}} + +To select the first link, you can do (because name = case-insensitive substring match unless exact=True is supplied): +first_link = page.get_by_role("article", name="Willy Wonka").get_by_role("link", name="Share") +Here is another example: + +article {{{header "How can I install CUDA on Ubuntu 22?"; link "Share";}}} +}; +article {{{header "How do I install Docker?"; link "Share";}}} + +To select the first link, you can do: +page.get_byRole("article").filter (has=page.get_byRole("header", "How can I install CUDA").get_byRole("link", name="Share") +Here, the .filter() is necessary to ensure that we select the correct article. This is especially important in cases where the parent element doesn't include relevant criteria for identifying a child element. Tips about this tree: +· If you see a node as a child of an iframe, you must use page.frame(name=...), and then access the node (via .get_by ROLE() or similar). +· Some elements will require interaction via .select_option(). They will be labeled as such. This is because they are HTML aka get_by_placeholder("Search GitLab") +2) > aka locator([[id=\"__BVID__158\"]]) + +Call log: + + + + + +URL: /byte blaze/accessible- html-content-patterns/-/branches + +[Omitted] + + + +The page currently displays a dialog that asks for confirmation to delete all merged branches. It requires typing 'delete' into an input box. After typing the confirmation text, the "Delete merged branches" button can be clicked to proceed. I will fill the textbox with 'delete' and then click the confirmation button to complete the task. + + + + + +```python +async def act(page): # Fill the confirmation textbox with 'delete' await page.get_by_role("dialog", name="Delete all merged branches?").get_by-role("textbox").fill("delete") # Click the "Delete merged branches" button to confirm deletion await page.get_by-role("dialog", name="Delete all merged branches?").get_by-role("button", name="Delete merged branches").click() +``` + +```txt + +``` + + + +URL: /byte blaze/accessible- html-content-patterns/-/branches + +```txt +[Omitted] +``` + + + +The merged branches have been successfully deleted in the 'byteblaze/accessible-xml-content-patterns' project. + +```erb + +``` + + + +URL: /byte blaze/accessible- html-content-patterns/~/branches + +```txt +[Omitted] +``` + +Here is the fix the agent made: removing the ambiguous DOM selector for " textbox", and replacing it with one that first selects the dialog window that contains the textbox, and then the textbox element within it. + +# delete_merged_branches + +```txt +async def deletemergedbranches(page, project_path): + return (0, 1) + [0, 1] = 0 + [0, 1] = 1 + [0, 1] = 2 + [0, 1] = 3 + [0, 1] = 4 + [0, 1] = 5 + [0, 1] = 6 + [0, 1] = 7 + [0, 1] = 8 + [0, 1] = 9 + [0, 1] = 10 + [0, 1] = 11 + [0, 1] = 12 + [0, 1] = 13 + [0, 1] = 14 + [0, 1] = 15 + [0, 1] = 16 + [0, 1] = 17 + [0, 1] = 18 + [0, 1] = 19 + [0, 1] = 20 + [0, 1] = 21 + [0, 1] = 22 + [0, 1] = 23 + [0, 1] = 24 + [0, 1] = 25 + [0, 1] = 26 + [0, 1] = 27 + [0, 1] = 28 + [0, 1] = 29 + [0, 1] = 30 + [0, 1] = 31 + [0, 1] = 32 + [0, 1] = 33 + [0, 1] = 34 + [0, 1] = 35 + [0, 1] = 36 + [0, 1] = 37 + [0, 1] = 38 + [0, 1] = 39 + [0, 1] = 40 + [0, 1] = 41 + [0, 1] = 42 + [0, 1] = 43 + [0, 1] = 44 + [0, 1] = 45 + [0, 1] = 46 + [0, 1] = 47 + [0, 1] = 48 + [0, 1] = 49 + [0, 1] = 50 + [0, 1] = 51 + [0, 1] = 52 + [0, 1] = 53 + [0, 1] = 54 + [0, 1] = 55 + [0, 1] = 56 + [0, 1] = 57 + [0, 1] = 58 + [0, 1] = 59 + [0, 1] = 60 + [0, 1] = 61 + [0, 1] = 62 + [0, 1] = 63 + [0, 1] = 64 + [0, 1] = 65 + [0, 1] = 66 + [0, 1] = 67 + [0, 1] = 68 + [0, 1] = 69 + [0, 1] = 70 + [0, 1] = 71 + [0, 1] = 72 + [0, 1] = +``` + +# D.2 Failure Cases and Limitations + +We observe some failure cases, which seem to be caused by gaps in LLM reasoning capabilities for certain situations. + +# D.2.1 Criteria for Verifying Success + +Because our criteria for a function to be "verified" was to have it be called without producing an exception, we found that occasionally, malfunctioning APIs could be marked as verified simply because they silenced all exceptions that could have occurred. This represents a measure for evaluation having unintended consequences. + +In the following example, a function was misused, and it was unable to resolve the necessary DOM elements given the page state. However, instead of improving the function's signature or adding a check to ensure the function was called correctly, the LLM adds "if" statements to simply avoid any of the atomic actions from producing an error. While this does reduce the number of exceptions, it does not improve the robustness of the API. + +# update_productquantity + +async def update_product_quantity(page, product_name, new_quantity): + +1 1 + +Update the quantity of a specified product in the Magento Admin Panel's Advanced Inventory settings. + +This function navigates to the product's edit page, opens the Advanced Inventory dialog, + +and updates the quantity to the specified value. It includes proactive checks for element availability. + +Initial UI State: + +- The user must be logged into the Magento Admin Panel. + +- The function assumes the product edit page is accessible via the '/admin/catalog/product/edit/name/{product_name}' URL. + +Args: + +page (Page): The playwright page object representing the browser page. + +product_name(str): The name of the product to update. + +newquantity(int):The new quantity to set for the product. + +Usage Log: + +- Successfully updated the quantity for 'Arcadio Gym Short' to 50. + +- Adjusted the quantity for 'Yoga Mat' to 100 without issues. + +- Encountered a timeout error when trying to update 'Sprite Yoga Mat'. + +Consider checking the availability of the 'Advanced Inventory' button. + +1 1 + +import re + +import asyncio + +await page.goto(f"/admin/catalog/product/edit/name/{product_name}/" + +advanced_inventory_button = page.get_by-role("button", name="Advanced Inventory") + +if await advanced_inventory_button.count() > 0: + +await advanced_inventory_button.click() + +else: + +print("Advanced Inventory button not found.") + +return + +qty_input = page.get_by-role("textbox", name="[GLOBAL] Qty") + +if await qty_input.count() > 0: + +await qty_input fills(str(new_quantity)) + +else: + +print("Quantity input not found.") + +return + +done_button = page.get_by-role("button", name="Done") + +if await done_button.count() > 0: + +await done_button.click() + +else: + +print("Done button not found.") + +return + +save_button = page.get_by-role("button", name="Save") + +if await save_button.count() > 0: + +await save_button.click() + +else: + +print("Save button not found.") + +return + +![](images/323149586afd80fbae2733b648b2e5eb7a1c197d854cc5e6542a41dec3475c4b.jpg) +Figure 6: The agent is unable to successfully verify that the "add collaborator to project" works because it does not have access to a list of emails that can appropriately be used as test cases. + +# D.2.2 Creating Functional Test Cases + +Some skills require interacting with data stored on the website, and their success is conditioned not on whether the UI is accurately interacted with but rather whether the function inputs are realistic. In the following example, the agent attempts to learn how to add collaborators to a Gitlab repository, but is unable to because it does not have a source of real test input data. Future work could explore automatically generating test cases for agents for successful evaluation in dynamic websites. + +# D.2.3 Ceiling of Agent Capabilities + +![](images/4d17a0e8318ad94249d4c8c424867fc0ef14b19c6bc9d7de9e422335dbf683c9.jpg) + +```python +- Successfully navigated to the 'Cars for Sale' section, applied filters, and requested quotes for multiple listings. - Encountered issues with automatic navigation using 'Next car' link, resolved by manual URL navigation. import asyncio await page.goto("https://www.cars.com/shopping/") await page.wait_for_load_state("networkidle") await close_overlays(page) await page.get_by_role("combobox", name="Make").select_option(make) await page.get_by-role("combobox", name="Model").select_option(model) show_MATCHes_button = page.get_byRole("button", name="Show matches") if await show_MATCHes_button.count() > 0: await show_MATCHes_button.click() else: print("Show matches button not found.") return await page.wait_for_load_state("networkidle") for _ in range(5): await page.get_byrole("textbox",name $=$ "First name").fill(first_name) await page.get_byrole("textbox",name $=$ "Last name").fill(last_name) await page.get_byrole("textbox",name $=$ "Email").fill(email) await page.get_byrole("combobox",name $=$ "Subject").select_option( "Get a price quote" ) submit_button = page.get_byrole("button", name="Email") if await submit_button.count() > 0: await submit_button.click() else: print("Submit button not found.") return await asyncio.sleep(2) next_car_link = page.get_byrole("link", name $=$ "Next car") if await next_car_link.count() > 0: await next_car_link.click() await page.wait_for_load_state("networkidle") else: print("Next car link not found. Navigation stopped.") break +``` + +# D.2.4 Fail to call API + +The agent does not call available APIs even when they are generated during exploration. As shown in Figure 7, for the task "Save a hamburger recipe," the agent should first call search Recipes_by_cuisine_type('hamburger') to obtain recipe details and then select the print option to save the recipes. However, the agent fails to call this API during the task execution, indicating a gap between exploration and execution phases, suggesting the need for improving the agent's policy to better utilize generated APIs. + +# search_recipes_by_cuisine_type + +```python +async def searchrecipes_by_cuisine_type(page, cuisine_type): + "" Searches for recipes based on a specified cuisine type on Cookpad. This function automates the process of searching for recipes by entering a cuisine type into the search functionality on the Cookpad homepage. +``` + +![](images/4e011efbefed4e7c4d0b6eee527d126e6082b24752a6e871c38288561817565e.jpg) +Figure 7: Screenshot of cookpad.com. The agent fails to call search Recipes_by_cuisine_type('hamburger') to obtain recipe details. + +```python +Args: page: The Playwright page object to interact with. cuisine_type (str): The cuisine type to search for, e.g., 'Italian', 'Mexican', 'Korean', 'Chinese', 'American'. Usage log: - Successfully used to search for 'Italian' cuisine recipes, displaying the correct results. import re await page.get_by_role("textbox", name="Search by recipe or ingredients"). fill( cuisine_type ) search_button = page.get_byrole("button", name $\equiv$ re.compile("Search",re. IgnORECASE)) if await search_button.is Visible(): await search_button.click() else: await pagekeyboard.press("Enter") +``` + +# D.2.5 Wrong Parameter + +In some cases, the agent correctly identifies the appropriate API but selects incorrect parameters. For example in Figure 8, in the task "Browse recipes for gluten-free chocolate chip cookies that can be made without nuts," the agent incorrectly uses the parameter search Recipes_by Ingredients(page, 'chocolate chip, -nuts'), whereas the correct parameter should be search Recipes_by_Ingredients(page, 'chocolate chip without nuts'). This indicates that the agent needs better parameter selection logic to enhance performance. + +```python +search_recipes_by Ingredients +async def search_recipes_by Ingredients(page, ingredients): + "" +``` + +![](images/539761f5c5718c889d442e4ae9ddab9d0c240e025524852374883354e97c7718.jpg) +Figure 8: Screenshot of cookpad.com. The agent incorrectly uses the parameter search Recipes_by Ingredients(page, 'chocolate chip, -nuts') instead of the correct parameter search Recipes_by Ingredients(page, 'chocolate chip without nuts') + +![](images/d5dcc4475ce353b83253ee873efa9f0b7597e17fb11fdb9b5d22ade9898c4263.jpg) + +```python +async def apply_multiple_filters(page, make, model, distance, zip_code, max_price): # Apply multiple filters on the Cars.com website including make, model, distance, ZIP code, and price range. await page.goto("https://www.cars.com/shopping/") await close_overlays(page) await filterCars_by.make_and_model(page, make, model) await refine_search_by_distance_andzip(page, distance $\equiv$ distance,zip_code $\equiv$ zip_code) if max_price and max_price.strip(): price_select $=$ page.locator(#make-model-max-price') if await price_select.count() $>0$ : await price_select.select_option(value $\equiv$ str(max_price)) else: print("Price selector not found") search_button $=$ page.get_byRole("tabpanel",name $\equiv$ "Make").locator("spark-button[datasearchtype $\equiv$ 'make']") if await search_button.count() $>0$ : await search_button.click() else: print("Search button not found in make panel") +``` + +Figure 9: An example of compositional API that calls multiple other APIs. + +# D.3 Compositional API + +# D.4 Website Roadmap API + +```python +async def navigate_toCars_for_sale(page): # Navigate to the 'Cars for Sale' section on the Cars.com website, which contains car sales information. import asyncio await page.goto("https://www.cars.com/" cars_for_sale_link = page.get_by-role("navigation", name="main menu").get_by-role( "link", name="Cars for Sale") if await cars_for_sale_link.count() > 0: await cars_for_sale_link.click() await asyncio.sleep(5) else: print("Cars for Sale link is not visible.") +``` + +Figure 10: An example of roadmap information contained in the synthesized APIs. The docstring of this API contains the corresponding functionality of this API. If the agent is new to this website, this function might require some exploration to get. \ No newline at end of file diff --git a/data/2025/2504_07xxx/2504.07079/images/19d15d6e7c216d3b323d7090dd8d06675893fb6ed06369fc58e17dcdccabb40d.jpg b/data/2025/2504_07xxx/2504.07079/images/19d15d6e7c216d3b323d7090dd8d06675893fb6ed06369fc58e17dcdccabb40d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2cd7c142353f4b60ebda89fd4989014a2a705da1 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/images/19d15d6e7c216d3b323d7090dd8d06675893fb6ed06369fc58e17dcdccabb40d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:680eacc69f104ea36790a682f4c5a9f65dbbb81267e48b5cc119f06241bf5aed +size 15408 diff --git a/data/2025/2504_07xxx/2504.07079/images/2b099ceaf2b9e84281aa8e1a61245f4853f487f25721e12edf8c65d1e63b7264.jpg b/data/2025/2504_07xxx/2504.07079/images/2b099ceaf2b9e84281aa8e1a61245f4853f487f25721e12edf8c65d1e63b7264.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b15a5c0d702133812a95bb44d1a8ec27cf758cad --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/images/2b099ceaf2b9e84281aa8e1a61245f4853f487f25721e12edf8c65d1e63b7264.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7c9796caaead2ff2935c77873d1d2e3acd58f4eec1503e545e4ecdde1a102474 +size 83308 diff --git a/data/2025/2504_07xxx/2504.07079/images/323149586afd80fbae2733b648b2e5eb7a1c197d854cc5e6542a41dec3475c4b.jpg b/data/2025/2504_07xxx/2504.07079/images/323149586afd80fbae2733b648b2e5eb7a1c197d854cc5e6542a41dec3475c4b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c8fdbd2a6ce264ec8e3bf568b5e137363caaf08d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/images/323149586afd80fbae2733b648b2e5eb7a1c197d854cc5e6542a41dec3475c4b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c178648cbf24493a1b1c582b5b8f5f05d381d472b3dc5959b0da928113daa407 +size 37532 diff --git a/data/2025/2504_07xxx/2504.07079/images/47ed1605ddd3a969e6d5dbfdca82e5ae9c5c1e8d81896d04a9483865c4dffe37.jpg b/data/2025/2504_07xxx/2504.07079/images/47ed1605ddd3a969e6d5dbfdca82e5ae9c5c1e8d81896d04a9483865c4dffe37.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4b8500cbc180dd3bdedfcc3226efb8bf0c940094 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/images/47ed1605ddd3a969e6d5dbfdca82e5ae9c5c1e8d81896d04a9483865c4dffe37.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c3714d31f9be3a09d43193e22e1408b1597dc805bd3d681762673e5493530e8 +size 65610 diff --git a/data/2025/2504_07xxx/2504.07079/images/4d17a0e8318ad94249d4c8c424867fc0ef14b19c6bc9d7de9e422335dbf683c9.jpg b/data/2025/2504_07xxx/2504.07079/images/4d17a0e8318ad94249d4c8c424867fc0ef14b19c6bc9d7de9e422335dbf683c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5e14f78d1b99bb5e2d461dd7d0ca9c6688a4f168 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/images/4d17a0e8318ad94249d4c8c424867fc0ef14b19c6bc9d7de9e422335dbf683c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08dc96578012796265d0ffa877ff14561310e5c8a87c21597d69ae2592e9d6e8 +size 99151 diff --git a/data/2025/2504_07xxx/2504.07079/images/4da1709e76cf9c3e41c7e70097e78dfad4e24e0974eb255b1fc58e7459842f3d.jpg b/data/2025/2504_07xxx/2504.07079/images/4da1709e76cf9c3e41c7e70097e78dfad4e24e0974eb255b1fc58e7459842f3d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b2fb2aeeb146a72810164bedba022da631637931 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/images/4da1709e76cf9c3e41c7e70097e78dfad4e24e0974eb255b1fc58e7459842f3d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f1d92784b862f64b40ce956fe1823773e4d0838cf1ecd9f4276123aa577b8bf2 +size 10833 diff --git a/data/2025/2504_07xxx/2504.07079/images/4e011efbefed4e7c4d0b6eee527d126e6082b24752a6e871c38288561817565e.jpg b/data/2025/2504_07xxx/2504.07079/images/4e011efbefed4e7c4d0b6eee527d126e6082b24752a6e871c38288561817565e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6883c146a7c2275e0851d3f38a249fb26bb653f3 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/images/4e011efbefed4e7c4d0b6eee527d126e6082b24752a6e871c38288561817565e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a89cb0f98549d0491968d7ed913a2b6be3548c9db94f3ffca905c0947295f15d +size 48057 diff --git a/data/2025/2504_07xxx/2504.07079/images/539761f5c5718c889d442e4ae9ddab9d0c240e025524852374883354e97c7718.jpg b/data/2025/2504_07xxx/2504.07079/images/539761f5c5718c889d442e4ae9ddab9d0c240e025524852374883354e97c7718.jpg new file mode 100644 index 0000000000000000000000000000000000000000..544cb8809bf09749ffc0487de6ada0e25fed4a48 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/images/539761f5c5718c889d442e4ae9ddab9d0c240e025524852374883354e97c7718.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3c4f6e3e858808d471bc59324435e509d247bb669c14cbfdc8ce7c3a8fed112a +size 71716 diff --git a/data/2025/2504_07xxx/2504.07079/images/8e7988471e6a363f9dbc194c899bc75b17eb32a2274d0d88a6a8bbcaf1420121.jpg b/data/2025/2504_07xxx/2504.07079/images/8e7988471e6a363f9dbc194c899bc75b17eb32a2274d0d88a6a8bbcaf1420121.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ca1aa80401d9183809892c7fc837fd48afd3ead7 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/images/8e7988471e6a363f9dbc194c899bc75b17eb32a2274d0d88a6a8bbcaf1420121.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:277bd8e846f3bc435dae105ff7e29d8e71a182c9ac357cc7b7b4e4ae030a0a04 +size 65005 diff --git a/data/2025/2504_07xxx/2504.07079/images/9a75190a0248f6201d9c79bf667afaaba7b26e0e3ac183b41f269689061f86ba.jpg b/data/2025/2504_07xxx/2504.07079/images/9a75190a0248f6201d9c79bf667afaaba7b26e0e3ac183b41f269689061f86ba.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f2ac9b8d775655d84828278a35b7b528fb8503cb --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/images/9a75190a0248f6201d9c79bf667afaaba7b26e0e3ac183b41f269689061f86ba.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:550ee7f49afb0ce52415d25f7d96e8baed42e955000ed16b2d9cabbca9174c42 +size 58968 diff --git a/data/2025/2504_07xxx/2504.07079/images/9be3566820824df0fb4afd10c6f034a3ea92106e97fbd989b5dc6eee49f0c717.jpg b/data/2025/2504_07xxx/2504.07079/images/9be3566820824df0fb4afd10c6f034a3ea92106e97fbd989b5dc6eee49f0c717.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1b965b91ca46ea7945720b3ce329ada9455158d9 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/images/9be3566820824df0fb4afd10c6f034a3ea92106e97fbd989b5dc6eee49f0c717.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb65330e8f6b533e1353faf50fab17dad5f045ee26f1266407c6f6bd34b0666f +size 10192 diff --git a/data/2025/2504_07xxx/2504.07079/images/9f663991fd933a1bc51c4398502876b99a36e912bf48aed268974b20402c9bc7.jpg b/data/2025/2504_07xxx/2504.07079/images/9f663991fd933a1bc51c4398502876b99a36e912bf48aed268974b20402c9bc7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d34b95883ad7f32e2ea22257668c7e8456b25f8a --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/images/9f663991fd933a1bc51c4398502876b99a36e912bf48aed268974b20402c9bc7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:46dffe5962cef8a665a18faa61655b6706a6b876c3d4220c62f4991eed358bf6 +size 14706 diff --git a/data/2025/2504_07xxx/2504.07079/images/a1cb53812cb12cd2115f6f9801c2edca9d085c54977c62ea4020be2c8ad80798.jpg b/data/2025/2504_07xxx/2504.07079/images/a1cb53812cb12cd2115f6f9801c2edca9d085c54977c62ea4020be2c8ad80798.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c2a08cf51d264be2a7a8995b316bb127b8c522e5 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/images/a1cb53812cb12cd2115f6f9801c2edca9d085c54977c62ea4020be2c8ad80798.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1703a954baa08a71b9d4c323fa2d7b39e912fb4c536dca41bf066d355064e7f +size 9182 diff --git a/data/2025/2504_07xxx/2504.07079/images/ac4b87be6827fc0b422426f9eb8310b6c709f5521c1d80c66dd9b72bf4771fbe.jpg b/data/2025/2504_07xxx/2504.07079/images/ac4b87be6827fc0b422426f9eb8310b6c709f5521c1d80c66dd9b72bf4771fbe.jpg new file mode 100644 index 0000000000000000000000000000000000000000..03d80bde64e992db070b0778af98d1391f293009 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/images/ac4b87be6827fc0b422426f9eb8310b6c709f5521c1d80c66dd9b72bf4771fbe.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f254e596238a9aaea423aedf122d51579f36ee41010f2574cbd4edc190045515 +size 12563 diff --git a/data/2025/2504_07xxx/2504.07079/images/b05d49d4771ae8e65e5a138dd731d3ffc20e497fb71fb485d50d17f679c1e3ab.jpg b/data/2025/2504_07xxx/2504.07079/images/b05d49d4771ae8e65e5a138dd731d3ffc20e497fb71fb485d50d17f679c1e3ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1187e787e0afaacdc13213f90987f3c885181c62 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/images/b05d49d4771ae8e65e5a138dd731d3ffc20e497fb71fb485d50d17f679c1e3ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29c6dddf7a2c306a509d5c39aa5dac8962c2c994cb5ee501dbdcc91a34aa4ff1 +size 45808 diff --git a/data/2025/2504_07xxx/2504.07079/images/d5dcc4475ce353b83253ee873efa9f0b7597e17fb11fdb9b5d22ade9898c4263.jpg b/data/2025/2504_07xxx/2504.07079/images/d5dcc4475ce353b83253ee873efa9f0b7597e17fb11fdb9b5d22ade9898c4263.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d56566802e6166e11faedcad6ba57fe3b0dbaa8 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/images/d5dcc4475ce353b83253ee873efa9f0b7597e17fb11fdb9b5d22ade9898c4263.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e1693049b9e26178f4a19815fc239d76c8618eb6b997efbce7cfd3eaa501ae7f +size 113920 diff --git a/data/2025/2504_07xxx/2504.07079/images/d73876740ac36fe6ed2add813b290f0783025673f627d1b0da9c0f205484f2f1.jpg b/data/2025/2504_07xxx/2504.07079/images/d73876740ac36fe6ed2add813b290f0783025673f627d1b0da9c0f205484f2f1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..047aaf3f6ad7f36528529ccd62a79453b01934e7 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/images/d73876740ac36fe6ed2add813b290f0783025673f627d1b0da9c0f205484f2f1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a7b73b7a66a37f206aef3a40682f2002452ba50dd4c4a611d3fd03cde5219ec5 +size 33886 diff --git a/data/2025/2504_07xxx/2504.07079/images/e8e603d1e36a974a635e695df504cf8dda0a3650221c82712c73a2e047a0c081.jpg b/data/2025/2504_07xxx/2504.07079/images/e8e603d1e36a974a635e695df504cf8dda0a3650221c82712c73a2e047a0c081.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2cc40566f52caa9202b9f78e171a3f25d22b0ac4 --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/images/e8e603d1e36a974a635e695df504cf8dda0a3650221c82712c73a2e047a0c081.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ef7c1b7fd94f3f2a355c03d65a74d1ec8b565f39125771308dc25e932ce96412 +size 61726 diff --git a/data/2025/2504_07xxx/2504.07079/images/f15aa8a60c451bf19aa9f97d46d05672dafcd4435585dc9510373dfcb676f191.jpg b/data/2025/2504_07xxx/2504.07079/images/f15aa8a60c451bf19aa9f97d46d05672dafcd4435585dc9510373dfcb676f191.jpg new file mode 100644 index 0000000000000000000000000000000000000000..756b644e302f1bfea5ecc4d61adcdabefb7d05ec --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/images/f15aa8a60c451bf19aa9f97d46d05672dafcd4435585dc9510373dfcb676f191.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:71f01230c8dd50ff7aaa6a4c2fba914d9b2e3a24a246b274e2687d6d4ce551cb +size 22053 diff --git a/data/2025/2504_07xxx/2504.07079/layout.json b/data/2025/2504_07xxx/2504.07079/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..5aa637b04f82925701b11dcec5a1fda96f70944d --- /dev/null +++ b/data/2025/2504_07xxx/2504.07079/layout.json @@ -0,0 +1,22459 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 105, + 78, + 505, + 111 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 78, + 505, + 111 + ], + "spans": [ + { + "bbox": [ + 105, + 78, + 505, + 111 + ], + "type": "text", + "content": "SkillWeaver: Web Agents can Self-Improve by Discovering and Honing Skills" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "spans": [ + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "text", + "content": "Boyuan Zheng" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "inline_equation", + "content": "^{1*}" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "text", + "content": ", Michael Y. Fatemi" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "text", + "content": ", Xiaolong Jin" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "inline_equation", + "content": "^{3*}" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "text", + "content": ", Zora Zhiruo Wang" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "text", + "content": ", Apurva Gandhi" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "text", + "content": ", Yueqi Song" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "text", + "content": ", Yu Gu" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "text", + "content": ", Jayanth Srinivasa" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "text", + "content": ", Gaowen Liu" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "text", + "content": ", Graham Neubig" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "text", + "content": ", Yu Su" + }, + { + "bbox": [ + 111, + 131, + 543, + 169 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 140, + 173, + 479, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 173, + 479, + 188 + ], + "spans": [ + { + "bbox": [ + 140, + 173, + 479, + 188 + ], + "type": "text", + "content": "1 The Ohio State University 2 University of Virginia 3 Purdue University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 202, + 186, + 418, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 202, + 186, + 418, + 199 + ], + "spans": [ + { + "bbox": [ + 202, + 186, + 418, + 199 + ], + "type": "text", + "content": "4 Carnegie Mellon University 5 Cisco Research" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 257, + 199, + 380, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 199, + 380, + 210 + ], + "spans": [ + { + "bbox": [ + 257, + 199, + 380, + 210 + ], + "type": "text", + "content": "{zheng.2372, su.809}@osu.edu" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 280, + 238, + 329, + 251 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 280, + 238, + 329, + 251 + ], + "spans": [ + { + "bbox": [ + 280, + 238, + 329, + 251 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 140, + 263, + 470, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 263, + 470, + 486 + ], + "spans": [ + { + "bbox": [ + 140, + 263, + 470, + 486 + ], + "type": "text", + "content": "To survive and thrive in complex environments, humans have evolved sophisticated self-improvement mechanisms through environment exploration, hierarchical abstraction of experiences into reusable skills, and collaborative construction of an ever-growing skill repertoire. Despite recent advancements, autonomous web agents still lack crucial self-improvement capabilities, struggling with procedural knowledge abstraction, skill refinement, and skill composition. In this work, we introduce SKILLWEAKER, a skill-centric framework that enables agents to self-improve by autonomously synthesizing reusable skills as APIs. Given a new website, the agent autonomously discovers skills, executes them for practice, and distills practice experiences into robust APIs. Iterative exploration continuously expands a library of lightweight, plug-and-play APIs, significantly enhancing the agent's capabilities. Experiments on WebArena and real-world websites demonstrate the efficacy of SKILLWEAKER, achieving relative success rate improvements of " + }, + { + "bbox": [ + 140, + 263, + 470, + 486 + ], + "type": "inline_equation", + "content": "31.8\\%" + }, + { + "bbox": [ + 140, + 263, + 470, + 486 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 140, + 263, + 470, + 486 + ], + "type": "inline_equation", + "content": "39.8\\%" + }, + { + "bbox": [ + 140, + 263, + 470, + 486 + ], + "type": "text", + "content": ", respectively. Additionally, APIs synthesized by strong agents substantially enhance weaker agents through transferable skills, yielding improvements of up to " + }, + { + "bbox": [ + 140, + 263, + 470, + 486 + ], + "type": "inline_equation", + "content": "54.3\\%" + }, + { + "bbox": [ + 140, + 263, + 470, + 486 + ], + "type": "text", + "content": " on WebArena. These results demonstrate the effectiveness of honing diverse website interactions into APIs, which can be seamlessly shared among various web agents." + }, + { + "bbox": [ + 140, + 263, + 470, + 486 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 506, + 195, + 518 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 506, + 195, + 518 + ], + "spans": [ + { + "bbox": [ + 105, + 506, + 195, + 518 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 531, + 506, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 531, + 506, + 665 + ], + "spans": [ + { + "bbox": [ + 104, + 531, + 506, + 665 + ], + "type": "text", + "content": "AI agents based on large language models (LLMs) that can browse the web (Deng et al., 2023; Zhou et al., 2024a; Zheng et al., 2024) or use computers (Xie et al., 2024) like humans are rapidly rising as a new frontier of AI research and application. Despite these promising opportunities, digital environments present substantial challenges due to their inherent complexity and diversity. Website environments are highly intricate, consisting of numerous interactive elements that create large action spaces. An even greater challenge lies in developing generalist web agents capable of generalizing to out-of-distribution task types and adapting to novel websites. Existing efforts have attempted to train web agents using large-scale trajectory datasets collected across diverse websites and task types (Li et al., 2024; Pahuja et al., 2025). However, these agents often struggle with overfitting to specific website structures and task distributions (Li et al., 2024; Zheng et al., 2024), reducing their ability to handle previously unseen environments effectively." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 669, + 506, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 669, + 506, + 703 + ], + "spans": [ + { + "bbox": [ + 104, + 669, + 506, + 703 + ], + "type": "text", + "content": "As an integral feat of human intelligence, self-improvement presents a promising solution to these challenges. Imagine a user visiting 'yelp.com' for the first time. In the beginning, she may not be familiar with the layout and functionalities provided by the website, so she" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 219, + 37, + 555 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 219, + 37, + 555 + ], + "spans": [ + { + "bbox": [ + 14, + 219, + 37, + 555 + ], + "type": "text", + "content": "arXiv:2504.07079v1 [cs.AI] 9 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 709, + 499, + 721 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 709, + 499, + 721 + ], + "spans": [ + { + "bbox": [ + 116, + 709, + 499, + 721 + ], + "type": "text", + "content": "*Equal Contribution. Work done during Michael and Xiaolong's internship at OSU NLP Group." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 118, + 721, + 440, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 721, + 440, + 731 + ], + "spans": [ + { + "bbox": [ + 118, + 721, + 440, + 731 + ], + "type": "text", + "content": "1All resources are available at https://github.com/OSU-NLP-Group/SkillWeaver." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 308, + 760 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 79, + 211, + 152 + ], + "blocks": [ + { + "bbox": [ + 106, + 79, + 211, + 152 + ], + "lines": [ + { + "bbox": [ + 106, + 79, + 211, + 152 + ], + "spans": [ + { + "bbox": [ + 106, + 79, + 211, + 152 + ], + "type": "image", + "image_path": "9be3566820824df0fb4afd10c6f034a3ea92106e97fbd989b5dc6eee49f0c717.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 213, + 79, + 366, + 152 + ], + "blocks": [ + { + "bbox": [ + 213, + 79, + 366, + 152 + ], + "lines": [ + { + "bbox": [ + 213, + 79, + 366, + 152 + ], + "spans": [ + { + "bbox": [ + 213, + 79, + 366, + 152 + ], + "type": "image", + "image_path": "19d15d6e7c216d3b323d7090dd8d06675893fb6ed06369fc58e17dcdccabb40d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 367, + 79, + 497, + 152 + ], + "blocks": [ + { + "bbox": [ + 367, + 79, + 497, + 152 + ], + "lines": [ + { + "bbox": [ + 367, + 79, + 497, + 152 + ], + "spans": [ + { + "bbox": [ + 367, + 79, + 497, + 152 + ], + "type": "image", + "image_path": "9f663991fd933a1bc51c4398502876b99a36e912bf48aed268974b20402c9bc7.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 106, + 158, + 211, + 215 + ], + "blocks": [ + { + "bbox": [ + 106, + 158, + 211, + 215 + ], + "lines": [ + { + "bbox": [ + 106, + 158, + 211, + 215 + ], + "spans": [ + { + "bbox": [ + 106, + 158, + 211, + 215 + ], + "type": "image", + "image_path": "a1cb53812cb12cd2115f6f9801c2edca9d085c54977c62ea4020be2c8ad80798.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 225, + 506, + 304 + ], + "lines": [ + { + "bbox": [ + 104, + 225, + 506, + 304 + ], + "spans": [ + { + "bbox": [ + 104, + 225, + 506, + 304 + ], + "type": "text", + "content": "Figure 1: An overview of the SKILLWEAKER framework. The Skill Proposal module (Stage I) identifies novel skills to practice based on observations of the environment and available APIs in the skill library. For each proposed skill, the agent executes it to generate trajectories, which are later evaluated by the reward model. If successful, the trajectory is utilized to synthesize an API (Stage II). To ensure robustness, the synthesized API undergoes testing with automatically generated test cases and debugging within the Skill Honing module (Stage III)." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 212, + 158, + 365, + 215 + ], + "blocks": [ + { + "bbox": [ + 212, + 158, + 365, + 215 + ], + "lines": [ + { + "bbox": [ + 212, + 158, + 365, + 215 + ], + "spans": [ + { + "bbox": [ + 212, + 158, + 365, + 215 + ], + "type": "image", + "image_path": "4da1709e76cf9c3e41c7e70097e78dfad4e24e0974eb255b1fc58e7459842f3d.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 367, + 158, + 495, + 215 + ], + "blocks": [ + { + "bbox": [ + 367, + 158, + 495, + 215 + ], + "lines": [ + { + "bbox": [ + 367, + 158, + 495, + 215 + ], + "spans": [ + { + "bbox": [ + 367, + 158, + 495, + 215 + ], + "type": "image", + "image_path": "ac4b87be6827fc0b422426f9eb8310b6c709f5521c1d80c66dd9b72bf4771fbe.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 324, + 506, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 324, + 506, + 449 + ], + "spans": [ + { + "bbox": [ + 104, + 324, + 506, + 449 + ], + "type": "text", + "content": "would explore the website and think about what tasks can be done. As she does more tasks on the website (e.g., searching for restaurants with various filters), common routines become memorized procedural knowledge (e.g., searching for high-end Korean restaurants entails three steps: type 'Korean' in the top search bar, click the 'Search' button shaped like a magnifier, and click the $$$$$' button on the search result page). Such routines abstracted from experiences essentially become high-level actions or skills that can be effortlessly summoned and carried out instead of re-learning things on the fly; they can even be composed into more complex routines, e.g., making a reservation at a restaurant involves first finding it. Similarly, through self-improvement, agents can build conceptual maps of the website environment, accumulate procedural knowledge as skills, compose simple skills into more complex ones, and leverage these learned skills to enhance their decision-making processes." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 451, + 506, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 451, + 506, + 631 + ], + "spans": [ + { + "bbox": [ + 104, + 451, + 506, + 631 + ], + "type": "text", + "content": "A few recent studies have tried to capture some facets of humans' self-improvement capability. Traditional methods typically store skills implicitly through action trajectories, primarily leveraging them as demonstrations for in-context learning (Murty et al., 2024b) or fine-tuning (Murty et al., 2024a; Su et al., 2025; Pahuja et al., 2025). Although these trajectory-based approaches can be effective, they struggle to explicitly abstract reusable procedural knowledge, resulting in heavy training demands and limited generalization to new websites and tasks. Furthermore, continuously updating models with new trajectories introduces significant concerns such as catastrophic forgetting and sensitivity to website changes. Additionally, storing and sharing extensive memory-intensive trajectory data also poses practical challenges for knowledge transfer among agents. Efforts like Agent Workflow Memory (Wang et al., 2024e) and ICAL (Sarch et al., 2024) take this a step further by generating abstract, reusable routines. However, its natural language-based routines pose challenges for formal verification and precise composition into new workflows. Both methods also focus on online learning with access to test queries or offline learning requiring high-quality annotated demonstrations. Instead, we focus on more challenging autonomous exploration where agents propose novel tasks without external supervision." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 634, + 507, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 507, + 703 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 507, + 703 + ], + "type": "text", + "content": "To this end, we propose SKILLWEAKER, a skill-centric framework that enables web agents to autonomously self-improve by exploring website environments and synthesizing reusable, structured skills (§ 2). Our framework leverages the ability of LLMs to iteratively curate APIs based on feedback from the interactive environment (Wang et al., 2023a; 2024d). Specifically, SKILLWEAKER comprises a three-stage pipeline (Figure 1): (1) systematic exploration of website environments to identify potential skills, (2) practicing and converting these skills" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 710, + 504, + 733 + ], + "type": "text", + "content": "For example, screenshot and HTML files consume approximately 0.3 GB per trajectory with an average of 7.3 actions in Mind2Web (Deng et al., 2023), making them hard to transfer on the fly." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "into robust, reusable APIs, and (3) testing and debugging to ensure API reliability during inference." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 110, + 506, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 232 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 232 + ], + "type": "text", + "content": "Results on WebArena (Zhou et al., 2024a) demonstrate substantial performance improvements after equipping web agents with APIs synthesized by SKILLWEAVER. We observe relative gains in success rate from " + }, + { + "bbox": [ + 104, + 110, + 506, + 232 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 104, + 110, + 506, + 232 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 110, + 506, + 232 + ], + "type": "inline_equation", + "content": "38\\%" + }, + { + "bbox": [ + 104, + 110, + 506, + 232 + ], + "type": "text", + "content": " with just 160 iterations of the three-stage pipeline. Further experiments with weaker agents revealed even more dramatic improvements, ranging from " + }, + { + "bbox": [ + 104, + 110, + 506, + 232 + ], + "type": "inline_equation", + "content": "40\\%" + }, + { + "bbox": [ + 104, + 110, + 506, + 232 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 110, + 506, + 232 + ], + "type": "inline_equation", + "content": "130\\%" + }, + { + "bbox": [ + 104, + 110, + 506, + 232 + ], + "type": "text", + "content": ". These results underscore two insights: (1) synthesized APIs can substantially enhance agent performance, and (2) weaker agents particularly benefit from APIs generated by more powerful counterparts, effectively distilling advanced knowledge into plug-and-play modules. To further validate the practicality of our method, we conduct experiments on real-world websites using tasks sampled from Online-Mind2Web (Xue et al., 2025; Deng et al., 2023). SKILLWEAVER achieved a " + }, + { + "bbox": [ + 104, + 110, + 506, + 232 + ], + "type": "inline_equation", + "content": "39.8\\%" + }, + { + "bbox": [ + 104, + 110, + 506, + 232 + ], + "type": "text", + "content": " relative improvement in success rate, demonstrating its effectiveness in complex, dynamic web environments." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 251, + 194, + 264 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 251, + 194, + 264 + ], + "spans": [ + { + "bbox": [ + 105, + 251, + 194, + 264 + ], + "type": "text", + "content": "2 SkillWeaver" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 278, + 506, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 278, + 506, + 400 + ], + "spans": [ + { + "bbox": [ + 104, + 278, + 506, + 400 + ], + "type": "text", + "content": "Our objective is to develop a framework that enables web agents to autonomously discover skills and construct a continuously expanding skill library in the form of APIs. In our work, \"API\" refers to a Python function containing Playwright code for a browser automation, rather than a (for example) REST API that handles HTTP requests. As illustrated in Figure 1, the exploration pipeline consists of three key modules: (1) Skill Proposal: Identifying new skills (e.g., a short description such as \"Identify pill using pill identifier\") for the agent to practice, facilitating the continuous discovery of relevant functionalities; (2) Skill Synthesis: Practicing the proposed skills and converting practiced trajectories into reusable Python functions, encapsulating learned behaviors into structured APIs; (3) Skill Honing: Testing and debugging the synthesized APIs using feedback from the environment and the reward model to ensure reliability." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 417, + 231, + 430 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 417, + 231, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 417, + 231, + 430 + ], + "type": "text", + "content": "2.1 Stage I: Skill Proposal" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 439, + 507, + 551 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 439, + 507, + 551 + ], + "spans": [ + { + "bbox": [ + 104, + 439, + 507, + 551 + ], + "type": "text", + "content": "Website environments encompass numerous underlying functionalities that require systematic exploration. LLMs have been employed as automatic curriculum (Tajwar et al., 2025) for exploring open-ended environments, like Minecraft (Wang et al., 2023a; Du et al., 2023), TextWorld (Song et al., 2024a), and household settings (Du et al., 2023). Similarly, we leverage LLMs as an automatic curriculum for discovering website functionalities, leveraging their internet-scale knowledge and HTML interpretation capabilities. To facilitate the understanding of website environments, we provide LLMs with detailed webpage observations, including screenshots, website names, URLs, and accessibility trees. Accessibility trees are particularly useful for understanding less familiar websites, such as CMS in WebArena, because they provide comprehensive structural and functional information." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 555, + 505, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 555, + 505, + 634 + ], + "spans": [ + { + "bbox": [ + 104, + 555, + 505, + 634 + ], + "type": "text", + "content": "In contrast to existing approaches (Zhou et al., 2024b; Murty et al., 2024b;a; Song et al., 2024a), we emphasize skill diversity by explicitly prompting LLMs to propose novel and reusable skills beyond the current skill repertoire. To ensure efficient exploration and high success rates in subsequent skill practice and API synthesis stages, we specifically target short-horizon, reusable skills that can be completed within a single API call. Our approach follows a curriculum progression from simple to complex skill compositions. The LLM is instructed to propose the following three types of tasks (Prompts in Appendix § A.1):" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 637, + 506, + 694 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 637, + 506, + 694 + ], + "spans": [ + { + "bbox": [ + 104, + 637, + 506, + 694 + ], + "type": "text", + "content": "Procedural Tasks. Procedural tasks require a sequence of atomic actions to achieve higher-level process automation objectives. Each procedural task corresponds to a workflow comprising actions that can be generalized to complete similar tasks. For example, the task identifying pills based on imprint and color involves a workflow where values are entered into textboxes, followed by clicking a submit button to finalize the search operation." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 698, + 505, + 733 + ], + "type": "text", + "content": "Navigational Tasks. Navigational tasks involve systematically exploring various sections or pages within a website. Through these tasks, agents can construct conceptual maps that capture the functionality of various webpages within the website. Examples include" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 105 + ], + "type": "text", + "content": "navigating to the \"customer reviews\" section on a product management website and accessing individual user profiles." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 110, + 504, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 504, + 166 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 504, + 166 + ], + "type": "text", + "content": "Information-Seeking Tasks. Information-seeking tasks involve scraping detailed data from webpages (e.g., extracting all commits from a GitHub repository). Enumerating all available items on webpages, such as comprehensive lists of reviews or faculty members, is typically challenging and requires extensive, repetitive actions. To address this challenge, generating specialized scraping APIs enables agents to efficiently retrieve all relevant information." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 186, + 240, + 198 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 186, + 240, + 198 + ], + "spans": [ + { + "bbox": [ + 105, + 186, + 240, + 198 + ], + "type": "text", + "content": "2.2 Stage II: Skill Synthesis" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 209, + 506, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 209, + 506, + 299 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 506, + 299 + ], + "type": "text", + "content": "This module aims at automatically generating robust and reusable APIs, consisting of the following three components: Skill Practice, Reward Model, and API Synthesis. A critical challenge in this process is ensuring the robustness of the generated APIs, as each individual module may introduce errors. We address this challenge through curriculum progression—advancing from simple to complex skill compositions—which improves accuracy since modules perform better on simpler tasks requiring fewer steps. Additionally, the subsequent Skill Honing module (Stage III) validates APIs through unit testing and debugging to ensure reliability." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 302, + 504, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 302, + 504, + 380 + ], + "spans": [ + { + "bbox": [ + 104, + 302, + 504, + 380 + ], + "type": "text", + "content": "Skill Practice. Given a task proposed in Stage I, the agent takes actions to complete it. For procedural and navigational tasks, we leverage a base agent to generate actions to repeatedly attempt to complete tasks. The agent repeatedly attempts to complete the tasks proposed in the previous section, using the reward model to determine successful completion. For information-seeking tasks, we synthesize data extraction code to gather target information from webpages, incorporating any necessary navigation steps, such as retrieving historical orders or customer information." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 385, + 506, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 385, + 506, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 385, + 506, + 475 + ], + "type": "text", + "content": "Reward Model. LLMs have demonstrated effectiveness as evaluators of correctness, both for answer accuracy (Zheng et al., 2023) and for determining whether action sequences successfully complete task instructions (Pan et al., 2024; Zhuge et al., 2024). We prompt an LLM to provide reward signals indicating task completion success with the following observations in the context: (1) Task description, (2) Action trajectory comprising a sequence of actions with screenshots and corresponding descriptions, (3) Environmental feedback, including code execution outcomes and observable website changes. Further details can be found in Appendix § A.5." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 479, + 258, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 479, + 258, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 479, + 258, + 733 + ], + "type": "text", + "content": "API Synthesis. This module encapsulates the agent's actions from successful trajectories into a reusable and generalized API. This is performed by creating a string representation of each of the state-action pairs in the trajectory and prompting the language model to generate a Python implementation. We statically analyze the function for common generation mistakes, as described in Appendix S A.6, and if any are detected, we prompt the model to generate the function again. As illustrated in Figure 2, each API includes a Python implementation (including a function signature, docstring, and code body), where the docstring particularly contains a usage log recording previous executions of the function and a description of the prerequisite state" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 262, + 481, + 507, + 698 + ], + "blocks": [ + { + "bbox": [ + 262, + 481, + 507, + 698 + ], + "lines": [ + { + "bbox": [ + 262, + 481, + 507, + 698 + ], + "spans": [ + { + "bbox": [ + 262, + 481, + 507, + 698 + ], + "type": "image", + "image_path": "2b099ceaf2b9e84281aa8e1a61245f4853f487f25721e12edf8c65d1e63b7264.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 263, + 713, + 506, + 746 + ], + "lines": [ + { + "bbox": [ + 263, + 713, + 506, + 746 + ], + "spans": [ + { + "bbox": [ + 263, + 713, + 506, + 746 + ], + "type": "text", + "content": "Figure 2: An example of a synthesized API from Drugs.com used to identify pills based on their characteristics." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 257, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 257, + 105 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 257, + 105 + ], + "type": "text", + "content": "of the website for the function to execute without errors." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 122, + 234, + 136 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 122, + 234, + 136 + ], + "spans": [ + { + "bbox": [ + 105, + 122, + 234, + 136 + ], + "type": "text", + "content": "2.3 Stage III: Skill Honing" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 145, + 506, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 145, + 506, + 213 + ], + "spans": [ + { + "bbox": [ + 104, + 145, + 506, + 213 + ], + "type": "text", + "content": "Despite significant efforts within the above modules to ensure the robustness of the synthesized API, it is not always guaranteed. To address this limitation, we integrate a stage to test and debug synthesized APIs. For APIs requiring no extra parameters except the by default Playwright page instance, we execute the API directly as a standalone unit test. For APIs requiring additional parameters, we leverage the LLM to generate appropriate parameter values that serve as comprehensive test cases." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 232, + 195, + 247 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 232, + 195, + 247 + ], + "spans": [ + { + "bbox": [ + 105, + 232, + 195, + 247 + ], + "type": "text", + "content": "3 Experiments" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 259, + 506, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 259, + 506, + 327 + ], + "spans": [ + { + "bbox": [ + 104, + 259, + 506, + 327 + ], + "type": "text", + "content": "We conducted experiments on both WebArena (Zhou et al., 2024a) and real-world live websites to evaluate our agent. WebArena is a widely used benchmark for web agent evaluation, providing a diverse set of websites designed to simulate real-world web interactions. Real-world websites further demonstrate the performance of our method in more diverse, dynamic, and complex website environments. We perform exploration on each website environment separately to derive the API library, as described in Section § 3.4." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 344, + 179, + 356 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 344, + 179, + 356 + ], + "spans": [ + { + "bbox": [ + 105, + 344, + 179, + 356 + ], + "type": "text", + "content": "3.1 WebArena" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "spans": [ + { + "bbox": [ + 104, + 366, + 506, + 467 + ], + "type": "text", + "content": "WebArena is a self-hostable, sandboxed web environment designed for developing and evaluating web agents. It simulates real-world websites across five common application domains: e-commerce (Shopping), social forums (Reddit), collaborative software development (Gitlab), content management (CMS), and navigation (Map). This benchmark includes a total of 812 tasks, each with automated evaluation metrics based on functional correctness. WebArena provides a stable testing environment with reliable automatic evaluation, making it a good platform for demonstrating the efficacy of our agent and analyzing various aspects of our approach. Further details on the WebArena benchmark used in our evaluation can be found in Appendix § B." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 471, + 506, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 471, + 506, + 538 + ], + "spans": [ + { + "bbox": [ + 104, + 471, + 506, + 538 + ], + "type": "text", + "content": "Human-Crafted APIs. The self-hosted WebArena sandbox environment grants access to its source code and administrative access for the simulated websites. This enables us to extract APIs that adhere to standardized protocols (e.g., REST) from the official documentation within the source code as well as from external sources (Song et al., 2024a). We further leverage these human-crafted official APIs to compare them against the APIs synthesized by SKILLWEAVER, demonstrating the quality of the generated APIs." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 555, + 227, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 227, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 227, + 567 + ], + "type": "text", + "content": "3.2 Real-World Websites" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 578, + 507, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 578, + 507, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 578, + 507, + 732 + ], + "type": "text", + "content": "Real-world websites present greater complexity and richness than simulated environments. To demonstrate the practicality and efficacy of our approach in real-world environments, we conduct evaluations on live websites. Specifically, we use Online-Mind2Web (Xue et al., 2025; Deng et al., 2023), a benchmark specifically designed for evaluating web agents on live websites. This benchmark encompasses 300 tasks across 136 popular websites spanning diverse domains. The tasks represent realistic scenarios commonly encountered in daily web interactions, as proposed by human annotators. Considering the cost of exploration on all the websites, we only consider websites with a sufficient number of tasks that can be accessed by Playwright. We end up with 4 websites that have at least 8 tasks to run online evaluations (Zheng et al., 2024; Yoran et al., 2024; He et al., 2024) and include 57 tasks, listed in Appendix § C. To ensure the validity of evaluation, we manually evaluate the success of agent trajectories. Specifically, we verify whether the actions fulfill all requirements posed by the task and whether retrieved information matches the target for information-seeking tasks." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 234, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 234, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 234, + 95 + ], + "type": "text", + "content": "3.3 Agent Implementation" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 105, + 504, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 105, + 504, + 129 + ], + "spans": [ + { + "bbox": [ + 104, + 105, + 504, + 129 + ], + "type": "text", + "content": "By default, we use GPT-4o3 with a temperature of 0.3. Following the default WebArena evaluation settings, we set the maximum number of steps per iteration to 10." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 133, + 506, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 133, + 506, + 201 + ], + "spans": [ + { + "bbox": [ + 104, + 133, + 506, + 201 + ], + "type": "text", + "content": "Baseline Agent. We implement a web agent with only browsing actions based on Code-Act (Wang et al., 2024b), which leverages an LLM to generate Python code using the Playwright browser automation library to interact with web environments through simple atomic actions such as click, type, and scroll. The observation space consists of webpage screenshots, an accessibility tree, and the execution results from previous actions, such as exceptions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 205, + 506, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 205, + 506, + 294 + ], + "spans": [ + { + "bbox": [ + 104, + 205, + 506, + 294 + ], + "type": "text", + "content": "Agent with Skills. To demonstrate the performance improvements gained from APIs, we extend the action space of the baseline agent by incorporating synthesized skills, enabling the agent to execute API calls. We leverage this agent to evaluate the performance improvements from both APIs synthesized by SKILLWEAVER and human-crafted APIs (Song et al., 2024b) from WebArena's official documentation. Given the extensive number of synthesized APIs, we introduce an API selection module that filters only relevant APIs from the API library. This module also removes APIs that do not satisfy pre-conditions. The prompt and skill serialization format can be found in Appendix § A.8." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 298, + 505, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 298, + 505, + 335 + ], + "spans": [ + { + "bbox": [ + 104, + 298, + 505, + 335 + ], + "type": "text", + "content": "Weaker Agent. To evaluate whether the synthesized APIs can plug and play in weaker agents, we implement a weaker variant by replacing GPT-4o with GPT-4o-mini4 while maintaining the same agent design." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 350, + 185, + 363 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 350, + 185, + 363 + ], + "spans": [ + { + "bbox": [ + 105, + 350, + 185, + 363 + ], + "type": "text", + "content": "3.4 Exploration" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 371, + 506, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 371, + 506, + 461 + ], + "spans": [ + { + "bbox": [ + 104, + 371, + 506, + 461 + ], + "type": "text", + "content": "During the exploration stage, we employ the agent described above to practice skills. Each website undergoes an exploration process consisting of 160 iterations with GPT-4o, where an iteration is defined as either attempting a proposed skill or testing an existing skill. Within each iteration, the agent proposes either a procedural or navigational task at the starting webpage. It can also propose an information-seeking task at the ending webpage after completing the procedural or navigational task in the previous iteration. During exploration, the agent can access APIs synthesized in the previous exploration steps after skill selection to compose more complex APIs." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 478, + 242, + 493 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 478, + 242, + 493 + ], + "spans": [ + { + "bbox": [ + 105, + 478, + 242, + 493 + ], + "type": "text", + "content": "4 Results and Analysis" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 505, + 228, + 517 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 505, + 228, + 517 + ], + "spans": [ + { + "bbox": [ + 105, + 505, + 228, + 517 + ], + "type": "text", + "content": "4.1 Experimental Results" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 525, + 504, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 525, + 504, + 582 + ], + "spans": [ + { + "bbox": [ + 104, + 525, + 504, + 582 + ], + "type": "text", + "content": "WebArena Results. Our experiments on WebArena demonstrate consistent performance improvements with the integration of synthesized APIs. As illustrated in Table 1, we observe a substantial relative improvement in success rate, " + }, + { + "bbox": [ + 104, + 525, + 504, + 582 + ], + "type": "inline_equation", + "content": "39.8\\%" + }, + { + "bbox": [ + 104, + 525, + 504, + 582 + ], + "type": "text", + "content": " on average, for the baseline agent with GPT-4o and an even larger improvement of " + }, + { + "bbox": [ + 104, + 525, + 504, + 582 + ], + "type": "inline_equation", + "content": "54.3\\%" + }, + { + "bbox": [ + 104, + 525, + 504, + 582 + ], + "type": "text", + "content": " with GPT-4o-mini across the evaluated websites." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 586, + 506, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 586, + 506, + 698 + ], + "spans": [ + { + "bbox": [ + 104, + 586, + 506, + 698 + ], + "type": "text", + "content": "When compared to AutoEval (Pan et al., 2024), which leverages an LLM-based reward model to guide inference-time exploration, SKILLWEAKER achieves higher average success rates and exhibits better or comparable performance across all domains, with the exception of the Shopping environment. The reason behind this exception is the inherent requirements for more extensive interaction with such websites for partially observable information, like dynamic product search results and product details. In comparison with SteP (Sodhi et al., 2024), which incorporates the external memory of domain-specific human-written workflows, SKILLWEAKER achieves better performance on CMS and Map environments, showing the promising quality of agent synthesized APIs even comparing with manually crafted workflows designed by human experts." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 116, + 708, + 269, + 721 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 708, + 269, + 721 + ], + "spans": [ + { + "bbox": [ + 116, + 708, + 269, + 721 + ], + "type": "text", + "content": "3Azure Endpoint: gpt-4o-2024-08-06" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 118, + 721, + 291, + 731 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 721, + 291, + 731 + ], + "spans": [ + { + "bbox": [ + 118, + 721, + 291, + 731 + ], + "type": "text", + "content": "4Azure Endpoint: gpt-4o-mini-2024-07-18" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 173, + 85, + 438, + 219 + ], + "blocks": [ + { + "bbox": [ + 173, + 85, + 438, + 219 + ], + "lines": [ + { + "bbox": [ + 173, + 85, + 438, + 219 + ], + "spans": [ + { + "bbox": [ + 173, + 85, + 438, + 219 + ], + "type": "table", + "html": "
MethodGitlabMapShoppingCMSRedditAVG.
WebArena15.015.613.910.46.612.3
AutoEval25.027.539.620.920.826.9
*SteP32.030.037.024.059.033.0
SKILLWEAVER
GPT-4o17.827.519.818.737.722.6
+ Skills22.233.927.225.850.029.8
Δ↑25%↑23%↑38%↑38%↑33%↑32%
GPT-4o-mini6.110.311.83.318.99.2
+ Skills8.916.717.17.726.414.1
Δ↑46%↑62%↑46%↑133%↑40%↑45%
", + "image_path": "e8e603d1e36a974a635e695df504cf8dda0a3650221c82712c73a2e047a0c081.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 192, + 262, + 418, + 316 + ], + "blocks": [ + { + "bbox": [ + 104, + 227, + 504, + 251 + ], + "lines": [ + { + "bbox": [ + 104, + 227, + 504, + 251 + ], + "spans": [ + { + "bbox": [ + 104, + 227, + 504, + 251 + ], + "type": "text", + "content": "Table 1: Task success rate on WebArena. The numbers in green represent the relative improvement percentage." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 192, + 262, + 418, + 316 + ], + "lines": [ + { + "bbox": [ + 192, + 262, + 418, + 316 + ], + "spans": [ + { + "bbox": [ + 192, + 262, + 418, + 316 + ], + "type": "table", + "html": "
MethodDrugFlightCookingCarAVG.
Baseline65.011.762.511.140.2
+ Skills87.029.475.011.156.2
Δ↑34%↑151%↑20%↑0%↑40%
", + "image_path": "f15aa8a60c451bf19aa9f97d46d05672dafcd4435585dc9510373dfcb676f191.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 324, + 488, + 335 + ], + "lines": [ + { + "bbox": [ + 121, + 324, + 488, + 335 + ], + "spans": [ + { + "bbox": [ + 121, + 324, + 488, + 335 + ], + "type": "text", + "content": "Table 2: Success rate in real-world website evaluation based on Online-Mind2Web." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 356, + 506, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 356, + 506, + 435 + ], + "spans": [ + { + "bbox": [ + 104, + 356, + 506, + 435 + ], + "type": "text", + "content": "Live Website Results. As shown in Table 2, integrating synthesized APIs leads to an average relative improvement of " + }, + { + "bbox": [ + 104, + 356, + 506, + 435 + ], + "type": "inline_equation", + "content": "39.8\\%" + }, + { + "bbox": [ + 104, + 356, + 506, + 435 + ], + "type": "text", + "content": " in success rate across four websites. For the Car website, while the final success rate remained unchanged after incorporating synthesized APIs, we observed that in four out of nine tasks, the synthesized APIs successfully guided the agent to the final state required for task completion. However, the agent still encountered failures during the final steps that demanded strong environment understanding and visual reasoning." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 450, + 171, + 463 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 450, + 171, + 463 + ], + "spans": [ + { + "bbox": [ + 105, + 450, + 171, + 463 + ], + "type": "text", + "content": "4.2 Analysis" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 472, + 504, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 472, + 504, + 529 + ], + "spans": [ + { + "bbox": [ + 104, + 472, + 504, + 529 + ], + "type": "text", + "content": "Generalization across Website. Generalization across diverse websites, particularly less common websites, remains a significant challenge. SKILLWEAVER presents a new angle to this challenge by integrating website-specific skills and knowledge through external memory in synthesized skill files. SKILLWEAVER is able to encapsulate skills and knowledge on website environments into code files containing accumulated skills." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 533, + 504, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 533, + 504, + 601 + ], + "spans": [ + { + "bbox": [ + 104, + 533, + 504, + 601 + ], + "type": "text", + "content": "To demonstrate the viability of such generalization, we don't include any website specific in the agent and adapt the agent to different websites by purely integrating synthesized API libraries generated during pre-hoc website explorations. As shown in both Table 1 and Table 2, our agent is not only able to achieve stable performance improvements across all 5 WebArena websites but also in the 4 real-world live websites that are more complex and noisy." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 605, + 504, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 605, + 504, + 662 + ], + "spans": [ + { + "bbox": [ + 104, + 605, + 504, + 662 + ], + "type": "text", + "content": "Generalization across Agents. A critical feature of the skill repertoire is its ability to generalize across different web agents. Once the exploration process is completed, the synthesized API library can be reused to expand and enhance the action space of other agents capable of API calling. We conduct a comparative experiment by replacing the backbone language model with GPT-4o-mini without any other modification to the agent." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 665, + 507, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 665, + 507, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 665, + 507, + 734 + ], + "type": "text", + "content": "According to the results in Table 1, after switching the language model to GPT-4o-mini, the performance of the baseline agent is substantially weaker than other agents based on GPT-4o. However, upon integration with our synthesized APIs, the agent based on GPT-4o-mini demonstrates remarkable relative success rate improvements ranging from " + }, + { + "bbox": [ + 104, + 665, + 507, + 734 + ], + "type": "inline_equation", + "content": "40\\%" + }, + { + "bbox": [ + 104, + 665, + 507, + 734 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 665, + 507, + 734 + ], + "type": "inline_equation", + "content": "133\\%" + }, + { + "bbox": [ + 104, + 665, + 507, + 734 + ], + "type": "text", + "content": " across five websites. Notably, this agent even achieves a better average success rate compared to the WebArena agent based on GPT-4o, particularly in the Map, Shopping, and" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 301, + 750, + 309, + 760 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 106 + ], + "type": "text", + "content": "Reddit websites. These results highlight the transferability of our synthesized APIs as a plug-and-play module to enhance agents." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 110, + 506, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 222 + ], + "type": "text", + "content": "As the base agents become stronger, particularly those capable of more sophisticated inference-time search (e.g., Operator), the potential of SKILLWEAVER can be further unleashed. While current agents can accomplish challenging tasks through trial-and-error (Song et al., 2024a) and search over webpages Koh et al. (2024), certain complex skills remain beyond their capabilities. For example, as illustrated in Appendix D.2.3, the task request quote_for_multiple listings requires repeated, sequential, successful searches for specific vehicles, followed by separate quote requests. This process demands both long-range planning and back-tracing capabilities, which present significant challenges for existing agents. As agents grow stronger, SKILLWEAVER will enable the synthesis of increasingly sophisticated and complex skills." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 225, + 506, + 271 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 225, + 506, + 271 + ], + "spans": [ + { + "bbox": [ + 104, + 225, + 506, + 271 + ], + "type": "text", + "content": "Comparison with Human-Crafted APIs. We further conducted a comparison of the performance of agent with synthesized skills with the human-crafted APIs extracted from the official documentation and outsourcing websites (Song et al., 2024a). Based on the level of API support, websites are classified into three categories: low, medium, and high." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 275, + 244, + 475 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 275, + 244, + 475 + ], + "spans": [ + { + "bbox": [ + 104, + 275, + 244, + 475 + ], + "type": "text", + "content": "As shown in Figure 3, the performance of the synthesized APIs is comparable with human-crafted APIs on websites with low API support, such as Reddit, and those with medium API support, like Shopping. This result suggests that our exploration process is capable of generating APIs with quality that is comparable to or even superior to manually crafted APIs found in official documentation. On websites with high API support, such as GitLab and Maps, the performance of the synthesized APIs is not as good." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 249, + 279, + 504, + 446 + ], + "blocks": [ + { + "bbox": [ + 249, + 279, + 504, + 446 + ], + "lines": [ + { + "bbox": [ + 249, + 279, + 504, + 446 + ], + "spans": [ + { + "bbox": [ + 249, + 279, + 504, + 446 + ], + "type": "image", + "image_path": "d73876740ac36fe6ed2add813b290f0783025673f627d1b0da9c0f205484f2f1.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 247, + 449, + 504, + 471 + ], + "lines": [ + { + "bbox": [ + 247, + 449, + 504, + 471 + ], + "spans": [ + { + "bbox": [ + 247, + 449, + 504, + 471 + ], + "type": "text", + "content": "Figure 3: Success rate with synthesized vs. human-crafted APIs." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 494, + 189, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 494, + 189, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 494, + 189, + 504 + ], + "type": "text", + "content": "4.3 Case Studies" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 517, + 506, + 595 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 517, + 506, + 595 + ], + "spans": [ + { + "bbox": [ + 104, + 517, + 506, + 595 + ], + "type": "text", + "content": "Emergence of Compositional APIs. After a certain number of exploration iterations, we observe that the pipeline begins to generate compositional APIs that call multiple simpler APIs. An example is shown in § D.3. This API is responsible for applying multiple filters to search results that involves sequentially calling another API to close a webpage overlay, followed by two additional APIs to refine the search. This emergent behavior indicates an increasing level of abstraction and efficiency in SKILLWEAKER's ability to synthesize and integrate discovered skills." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 599, + 506, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 599, + 506, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 599, + 506, + 733 + ], + "type": "text", + "content": "Limitation in Skill Use. LLMs like GPT-4o are still not robust enough at API calling, and that hurts API-augmented agents like ours, even with human-crafted APIs. This challenge is even greater for weaker LLMs such as GPT-4o-mini, as illustrated in Table 1. We identify two primary categories of failures: (1) failure to identify the appropriate API and (2) generating wrong parameters. For example, SKILLWEAKER synthesized APIs to augment agents to search recipes in Cookpad.com. As demonstrated in Appendix D.2.4, the LLM is not able to identify the search Recipes_by_cuisine_type('hamburger') API to finish the task \"Save a hamburger recipe\". In Appendix D.2.5, the LLM successfully identifies the right API search Recipes_by Ingredients(page, 'ingredients') to complete the task \"Browse recipes for gluten-free chocolate chip cookies that can be made without nuts\". However, it generates the wrong keyword 'chocolate chip, -nuts' instead of 'chocolate chip without nuts', leading to empty search results." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 201, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 201, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 201, + 94 + ], + "type": "text", + "content": "5 Related Work" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 107, + 106, + 507, + 283 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 106, + 507, + 283 + ], + "spans": [ + { + "bbox": [ + 107, + 106, + 507, + 283 + ], + "type": "text", + "content": "Web Agents. Automated web agents have recently emerged as a critical research direction with the vision of automating workflows in the internet. Substantial progress in web agents has been achieved by leveraging powerful LLMs alongside diverse prompting strategies to automate the prediction of web page actions (Zheng et al., 2024; Furuta et al., 2024; Zhou et al., 2024a; Yao et al., 2022). Given inputs like HTML content, accessibility trees, and screenshots, LLM-based approaches use in-context examples, skill usage, and the ReAct framework (Yao et al., 2023) to improve action prediction. These models can be further enhanced with action-coordinate pair data for better grounding (Cheng et al., 2024; Gou et al., 2024; You et al., 2024), learning from multiple web agent trajectories to improve multi-step reasoning and planning (Lai et al., 2024; Shaw et al., 2023; Deng et al., 2023), or training the agent to directly output pixel coordinates (Hong et al., 2024). Another line of work uses planning. Tree search agent (Koh et al., 2024) performs a best-first tree search using an LLM as a heuristic, and AgentQ (Putta et al., 2024) and WebPilot (Zhang et al., 2024) employ Monte Carlo Tree Search. The World-model-augmented web agent (Chae et al., 2024) and WebDreamer (Gu et al., 2024b) integrate LLM-based world models to anticipate the outcomes of its actions." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 287, + 506, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 287, + 506, + 389 + ], + "spans": [ + { + "bbox": [ + 104, + 287, + 506, + 389 + ], + "type": "text", + "content": "Tool Use and Synthesis. With the great potentials exhibited by tool-augmented LMs (Schick et al., 2023; Wang et al., 2024c), many works explore using LLMs to make tools across math reasoning (Cai et al., 2024; Qian et al., 2023; Yuan et al., 2024a), theorem proving (Wang et al., 2024a), structured data analysis (Lu et al., 2023; Wang et al., 2024d), and digital agent automation (Wang et al., 2023a; Gu et al., 2024a; Song et al., 2024b). However, existing methods rely on a set of existing training examples to determine tool correctness (Yuan et al., 2024a), or user input queries to bootstrap the tool-making system (Wang et al., 2024d). Our work does not necessitate annotated data and enables agents to gather experiences by self-exploring the environment while synthesizing more skills." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 392, + 507, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 392, + 507, + 615 + ], + "spans": [ + { + "bbox": [ + 107, + 392, + 507, + 615 + ], + "type": "text", + "content": "Self-Improvement. Many works explore collecting trajectories through experience, rating the success with a reward model, and using the results for policy training (Patel et al., 2024; Huang et al., 2023; Madaan et al., 2023; Wang et al., 2023b; Singh et al., 2024; Chen et al., 2024; Yuan et al., 2024b). These approaches can be broadly categorized into parametric and non-parametric approaches. Parametric training with exploration trajectories has been investigated using various strategies, such as supervised fine-tuning (e.g., WebGUM (Furuta et al., 2024), Patel et al. (2024), and ScribeAgent (Shen et al., 2024)) and reinforcement learning-style optimization (e.g., ETO (Song et al., 2024a) and PAE (Zhou et al., 2024b)). Non-parametric methods typically transform exploration trajectories into reusable APIs or workflows that can be composed into more complex skills, like Voyager (Wang et al., 2023a) an LLM-based lifelong learning agent that continually explores the Minecraft environment. Friday (Wu et al., 2024) also investigates self-improvement on operating systems through exploration with pre-defined learning objectives. For web agents, the collected trajectories can be used as examples for in-context learning (Murty et al., 2024b), provide targets for fine-tuning (Furuta et al., 2024; Patel et al., 2024; Song et al., 2024a), and be distilled into text instructions to reproduce the desired behavior (workflows) (Wang et al., 2024e). Our work focuses on enabling web agents to continuously expand their skill repertoire by autonomously exploring website environments and distilling successful trajectories into reusable skills represented as Python APIs. These APIs are created to enable the agent to operate more efficiently during subsequent exploration and task execution." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 628, + 189, + 641 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 628, + 189, + 641 + ], + "spans": [ + { + "bbox": [ + 105, + 628, + 189, + 641 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 654, + 507, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 654, + 507, + 734 + ], + "spans": [ + { + "bbox": [ + 104, + 654, + 507, + 734 + ], + "type": "text", + "content": "Web agents aim to automate browsing tasks to enhance human productivity across diverse digital environments. A key challenge lies in adapting to real-world websites characterized by high diversity and complexity. Inspired by the self-improvement mechanism of humans, we propose SKILLWEAYER, a skill-centric framework that enables web agents to autonomously self-improve through exploration and API synthesis. Experimental results demonstrate substantial performance improvements, with relative success rate gains of " + }, + { + "bbox": [ + 104, + 654, + 507, + 734 + ], + "type": "inline_equation", + "content": "31.8\\%" + }, + { + "bbox": [ + 104, + 654, + 507, + 734 + ], + "type": "text", + "content": " on WebArena benchmark and " + }, + { + "bbox": [ + 104, + 654, + 507, + 734 + ], + "type": "inline_equation", + "content": "39.8\\%" + }, + { + "bbox": [ + 104, + 654, + 507, + 734 + ], + "type": "text", + "content": " on real-world websites. Notably, weaker agents" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "spans": [ + { + "bbox": [ + 302, + 751, + 309, + 760 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "spans": [ + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": "equipped with skills from more powerful counterparts showed improvements of up to " + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "inline_equation", + "content": "54.3\\%" + }, + { + "bbox": [ + 104, + 82, + 504, + 128 + ], + "type": "text", + "content": ", demonstrating effective knowledge distillation through lightweight, transferable modules. Our findings point to a promising future where web agents can learn to improve themselves without parameter tuning by evolving a collective skill base." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 144, + 212, + 159 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 144, + 212, + 159 + ], + "spans": [ + { + "bbox": [ + 105, + 144, + 212, + 159 + ], + "type": "text", + "content": "Acknowledgments" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 170, + 504, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 170, + 504, + 194 + ], + "spans": [ + { + "bbox": [ + 104, + 170, + 504, + 194 + ], + "type": "text", + "content": "We would like to extend our appreciation to colleagues from the OSU NLP group and Anqi Feng for their insightful comments. This work is supported by a gift from Cisco." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 210, + 200, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 210, + 200, + 224 + ], + "spans": [ + { + "bbox": [ + 105, + 210, + 200, + 224 + ], + "type": "text", + "content": "Ethics Statement" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 236, + 506, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 236, + 506, + 414 + ], + "spans": [ + { + "bbox": [ + 104, + 236, + 506, + 414 + ], + "type": "text", + "content": "Generalist web agents hold the potential to automate routine web tasks, enhance user experiences, and promote web accessibility, safety concerns related to their real-world deployment are also critical. These concerns span privacy issues, such as access to users' personal profiles, and sensitive operations, such as financial transactions or application form submissions. During online evaluation, we noticed the possibility of these web agents generating harmful actions on the web, and we manually validated the safety of all the actions before execution. Exploration on live website may lead to potential safety concerns about trigger harmful actions. In case the agent is being used on a live website, we add safety instructions which guardrail the agent away from actions that could potentially have side effects or that would otherwise be disadvantageous to explore, such as creating accounts or interacting with human users. It is critical for further research to thoroughly assess and mitigate the safety risks associated with web agents, ensuring they are safeguarded against producing and executing harmful actions. The code will also be released solely for research purposes, with the goal of making the web more accessible via language technologies under an OPEN-RAIL license. We are strongly against any potentially harmful use of the data or technology by any party." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 430, + 168, + 442 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 430, + 168, + 442 + ], + "spans": [ + { + "bbox": [ + 105, + 430, + 168, + 442 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 449, + 507, + 733 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 105, + 449, + 506, + 495 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 449, + 506, + 495 + ], + "spans": [ + { + "bbox": [ + 105, + 449, + 506, + 495 + ], + "type": "text", + "content": "Tianle Cai, Xuezhi Wang, Tengyu Ma, Xinyun Chen, and Denny Zhou. Large language models as tool makers. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=qV83K9d5WB." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 503, + 507, + 549 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 503, + 507, + 549 + ], + "spans": [ + { + "bbox": [ + 105, + 503, + 507, + 549 + ], + "type": "text", + "content": "Hyungjoo Chae, Namyoung Kim, Kai Tzu iunn Ong, Minju Gwak, Gwanwoo Song, Ji-hoon Kim, Sunghwan Kim, Dongha Lee, and Jinyoung Yeo. Web agents with world models: Learning and leveraging environment dynamics in web navigation. ArXiv, abs/2410.13232, 2024. URL https://api-semanticscholar.org/CorpusID:273404026." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 557, + 507, + 604 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 557, + 507, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 557, + 507, + 604 + ], + "type": "text", + "content": "Zixiang Chen, Yihe Deng, Huizhuo Yuan, Kaixuan Ji, and Quanquan Gu. Self-play finetuning converts weak language models to strong language models. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=04cHTxW9BS." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 611, + 507, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 611, + 507, + 689 + ], + "spans": [ + { + "bbox": [ + 105, + 611, + 507, + 689 + ], + "type": "text", + "content": "Kanzhi Cheng, Qiushi Sun, Yougang Chu, Fangzhi Xu, Yantao Li, Jianbing Zhang, and Zhiyong Wu. Seeclick: Harnessing GUI grounding for advanced visual GUI agents. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August 11-16, 2024, pp. 9313-9332. Association for Computational Linguistics, 2024. doi: 10.18653/V1/2024.ACL-LONG.505. URL https://doi.org/10.18653/v1/2024.acl-long.505." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 698, + 507, + 733 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 698, + 507, + 733 + ], + "spans": [ + { + "bbox": [ + 105, + 698, + 507, + 733 + ], + "type": "text", + "content": "Xiang Deng, Yu Gu, Boyuan Zheng, Shijie Chen, Samual Stevens, Boshi Wang, Huan Sun, and Yu Su. Mind2web: Towards a generalist agent for the web. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 81, + 507, + 734 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 115, + 81, + 507, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 81, + 507, + 127 + ], + "spans": [ + { + "bbox": [ + 115, + 81, + 507, + 127 + ], + "type": "text", + "content": "(eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitnet/5950bf290a1570ea401bf98882128160-Abstract-Datasets_and_Benchmarks.html." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 133, + 507, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 133, + 507, + 212 + ], + "spans": [ + { + "bbox": [ + 104, + 133, + 507, + 212 + ], + "type": "text", + "content": "Yuqing Du, Olivia Watkins, Zihan Wang, Cedric Colas, Trevor Darrell, Pieter Abbeel, Abhishek Gupta, and Jacob Andreas. Guiding pretraining in reinforcement learning with large language models. In Andreas Krause, Emma Brunskill, Kyunghyun Cho, Barbara Engelhardt, Sivan Sabato, and Jonathan Scarlett (eds.), International Conference on Machine Learning, ICML 2023, 23-29 July 2023, Honolulu, Hawaii, USA, volume 202 of Proceedings of Machine Learning Research, pp. 8657-8677. PMLR, 2023. URL https://proceedings.mlr.press/v202/du23f.html." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 218, + 507, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 218, + 507, + 275 + ], + "spans": [ + { + "bbox": [ + 105, + 218, + 507, + 275 + ], + "type": "text", + "content": "Hiroki Furuta, Kuang-Huei Lee, Ofir Nachum, Yutaka Matsuo, Aleksandra Faust, Shixiang Shane Gu, and Izzeddin Gur. Multimodal web navigation with instructionfinetuned foundation models. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024. URL https://openreview.net/forum?id=efFmBWioSc." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 281, + 506, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 281, + 506, + 316 + ], + "spans": [ + { + "bbox": [ + 105, + 281, + 506, + 316 + ], + "type": "text", + "content": "Boyu Gou, Ruohan Wang, Boyuan Zheng, Yanan Xie, Cheng Chang, Yiheng Shu, Huan Sun, and Yu Su. Navigating the digital world as humans do: Universal visual grounding for gui agents. arXiv preprint arXiv:2410.05243, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 322, + 507, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 322, + 507, + 399 + ], + "spans": [ + { + "bbox": [ + 105, + 322, + 507, + 399 + ], + "type": "text", + "content": "Yu Gu, Yiheng Shu, Hao Yu, Xiao Liu, Yuxiao Dong, Jie Tang, Jayanth Srinivasa, Hugo Latapie, and Yu Su. Middleware for llms: Tools are instrumental for language agents in complex environments. In Yaser Al-Onaizan, Mohit Bansal, and Yun-Nung Chen (eds.), Proceedings of the 2024 Conference on Empirical Methods in Natural Language Processing, EMNLP 2024, Miami, FL, USA, November 12-16, 2024, pp. 7646-7663. Association for Computational Linguistics, 2024a. URL https://aclanthology.org/2024.emnlp-main.436." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 406, + 507, + 452 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 406, + 507, + 452 + ], + "spans": [ + { + "bbox": [ + 105, + 406, + 507, + 452 + ], + "type": "text", + "content": "Yu Gu, Kai Zhang, Yuting Ning, Boyuan Zheng, Boyu Gou, Tianci Xue, Cheng Chang, Sanjari Srivastava, Yanan Xie, Peng Qi, Huan Sun, and Yu Su. Is your llm secretly a world model of the internet? model-based planning for web agents, 2024b. URL https://arxiv.org/abs/2411.06559." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 457, + 507, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 457, + 507, + 536 + ], + "spans": [ + { + "bbox": [ + 105, + 457, + 507, + 536 + ], + "type": "text", + "content": "Hongliang He, Wenlin Yao, Kaixin Ma, Wenhao Yu, Yong Dai, Hongming Zhang, Zhenzhong Lan, and Dong Yu. Webvoyager: Building an end-to-end web agent with large multimodal models. In Lun-Wei Ku, Andre Martins, and Vivek Srikumar (eds.), Proceedings of the 62nd Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2024, Bangkok, Thailand, August 11-16, 2024, pp. 6864-6890. Association for Computational Linguistics, 2024. doi: 10.18653/V1/2024.ACL-LONG.371. URL https://doi.org/10.18653/v1/2024.acl-long.371." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 542, + 507, + 588 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 542, + 507, + 588 + ], + "spans": [ + { + "bbox": [ + 105, + 542, + 507, + 588 + ], + "type": "text", + "content": "Wenyi Hong, Weihan Wang, Qingsong Lv, Jiazheng Xu, Wenmeng Yu, Junhui Ji, Yan Wang, Zihan Wang, Yuxiao Dong, Ming Ding, and Jie Tang. Cogagent: A visual language model for gui agents. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pp. 14281-14290, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 594, + 507, + 662 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 594, + 507, + 662 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 507, + 662 + ], + "type": "text", + "content": "Jiaxin Huang, Shixiang Gu, Le Hou, Yuexin Wu, Xuezhi Wang, Hongkun Yu, and Jiawei Han. Large language models can self-improve. In Houda Bouamor, Juan Pino, and Kalika Bali (eds.), Proceedings of the 2023 Conference on Empirical Methods in Natural Language Processing, EMNLP 2023, Singapore, December 6-10, 2023, pp. 1051-1068. Association for Computational Linguistics, 2023. doi: 10.18653/V1/2023.EMNLP-MAIN.67. URL https://doi.org/10.18653/v1/2023.emnlp-main.67." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 668, + 507, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 668, + 507, + 702 + ], + "spans": [ + { + "bbox": [ + 105, + 668, + 507, + 702 + ], + "type": "text", + "content": "Jing Yu Koh, Stephen McAleer, Daniel Fried, and Ruslan Salakhutdinov. Tree search for language model agents. CoRR, abs/2407.01476, 2024. doi: 10.48550/ARXIV.2407.01476. URL https://doi.org/10.48550/arXiv.2407.01476." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 708, + 507, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 507, + 734 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 507, + 734 + ], + "type": "text", + "content": "Hanyu Lai, Xiao Liu, Iat Long Iong, Shuntian Yao, Yuxuan Chen, Pengbo Shen, Hao Yu, Hanchen Zhang, Xiaohan Zhang, Yuxiao Dong, et al. Autowebglm: A large language" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 506, + 731 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 115, + 82, + 504, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 82, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 115, + 82, + 504, + 106 + ], + "type": "text", + "content": "model-based web navigating agent. In Proceedings of the 30th ACM SIGKDD Conference on Knowledge Discovery and Data Mining, pp. 5295-5306, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 107, + 110, + 506, + 199 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 110, + 506, + 199 + ], + "spans": [ + { + "bbox": [ + 107, + 110, + 506, + 199 + ], + "type": "text", + "content": "Wei Li, William E. Bishop, Alice Li, Christopher Rawles, Folawiyo Campbell-Ajala, Divya Tyamagundlu, and Oriana Riva. On the effects of data scale on UI control agents. In Amir Globersons, Lester Mackey, Danielle Belgrave, Angela Fan, Ulrich Paquet, Jakub M. Tomczak, and Cheng Zhang (eds.), Advances in Neural Information Processing Systems 38: Annual Conference on Neural Information Processing Systems 2024, NeurIPS 2024, Vancouver, BC, Canada, December 10 - 15, 2024, 2024. URL http://papers.nips.cc/paper_files/paper/2024/bitstream/a79f3ef3b445fd4659f44648f7ea8ffd-Abstract-Datasets_and_Benchmarks_Track.html." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 205, + 506, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 205, + 506, + 282 + ], + "spans": [ + { + "bbox": [ + 106, + 205, + 506, + 282 + ], + "type": "text", + "content": "Pan Lu, Baolin Peng, Hao Cheng, Michel Galley, Kai-Wei Chang, Ying Nian Wu, Song-Chun Zhu, and Jianfeng Gao. Chameleon: Plug-and-play compositional reasoning with large language models. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/black/871ed095b734818cfba48db6aab25a62-Abstract-Conference.html." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 289, + 506, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 289, + 506, + 388 + ], + "spans": [ + { + "bbox": [ + 106, + 289, + 506, + 388 + ], + "type": "text", + "content": "Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegrefe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, Shashank Gupta, Bodhisattwa Prasad Majumder, Katherine Hermann, Sean Welleck, Amir Yazdanbakhsh, and Peter Clark. Self-refine: Iterative refinement with self-feedback. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bit/91edff07232fb1b55a505a9e9f6c0ff3-Abstract-Conference.html." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 394, + 506, + 438 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 394, + 506, + 438 + ], + "spans": [ + { + "bbox": [ + 106, + 394, + 506, + 438 + ], + "type": "text", + "content": "Shikhar Murty, Dzmitry Bahdanau, and Christopher D. Manning. Nnetscape navigator: Complex demonstrations for web agents without a demonstrator. CoRR, abs/2410.02907, 2024a. doi: 10.48550/ARXIV.2410.02907. URL https://doi.org/10.48550/arXiv.2410.02907." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 445, + 506, + 489 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 445, + 506, + 489 + ], + "spans": [ + { + "bbox": [ + 105, + 445, + 506, + 489 + ], + "type": "text", + "content": "Shikhar Murty, Christopher D. Manning, Peter Shaw, Mandar Joshi, and Kenton Lee. BAGEL: bootstrapping agents by guiding exploration with language. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024b. URL https://openreview.net/forum?id=VsvfSMI5bs." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 495, + 506, + 540 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 495, + 506, + 540 + ], + "spans": [ + { + "bbox": [ + 106, + 495, + 506, + 540 + ], + "type": "text", + "content": "Vardaan Pahuja, Yadong Lu, Corby Rosset, Boyu Gou, Arindam Mitra, Spencer Whitehead, Yu Su, and Ahmed Awadallah. Explorer: Scaling exploration-driven web trajectory synthesis for multimodal web agents. 2025. URL https://api-semanticscholar.org/ CorpusID:276408442." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 546, + 506, + 580 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 546, + 506, + 580 + ], + "spans": [ + { + "bbox": [ + 106, + 546, + 506, + 580 + ], + "type": "text", + "content": "Jiayi Pan, Yichi Zhang, Nicholas Tomlin, Yifei Zhou, Sergey Levine, and Alane Suhr. Autonomous evaluation and refinement of digital agents. CoRR, abs/2404.06474, 2024. doi: 10.48550/ARXIV.2404.06474. URL https://doi.org/10.48550/arXiv.2404.06474." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 586, + 506, + 630 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 586, + 506, + 630 + ], + "spans": [ + { + "bbox": [ + 106, + 586, + 506, + 630 + ], + "type": "text", + "content": "Ajay Patel, Markus Hofmarcher, Claudiu Leoveanu-Condrei, Marius-Constantin Dinu, Chris Callison-Burch, and Sepp Hochreiter. Large language models can self-improve at web agent tasks. CoRR, abs/2405.20309, 2024. doi: 10.48550/ARXIV.2405.20309. URL https://doi.org/10.48550/arXiv.2405.20309." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 636, + 504, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 636, + 504, + 681 + ], + "spans": [ + { + "bbox": [ + 106, + 636, + 504, + 681 + ], + "type": "text", + "content": "Pranav Putta, Edmund Mills, Naman Garg, Sumeet Motwani, Chelsea Finn, Divyansh Garg, and Rafael Rafailov. Agent Q: advanced reasoning and learning for autonomous AI agents. CoRR, abs/2408.07199, 2024. doi: 10.48550/ARXIV.2408.07199. URL https://doi.org/10.48550/arXiv.2408.07199." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 687, + 504, + 731 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 687, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 106, + 687, + 504, + 731 + ], + "type": "text", + "content": "Cheng Qian, Chi Han, Yi Fung, Yujia Qin, Zhiyuan Liu, and Heng Ji. CREATOR: Tool creation for disentangling abstract and concrete reasoning of large language models. In The 2023 Conference on Empirical Methods in Natural Language Processing, 2023. URL https://openreview.net/forum?id=aCHq10rQiH." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 508, + 734 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 106, + 81, + 507, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 81, + 507, + 128 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 507, + 128 + ], + "type": "text", + "content": "Gabriel Sarch, Lawrence Jang, Michael J. Tarr, William W. Cohen, Kenneth Marino, and Katerina Fragkiadaki. Ical: Continual learning of multimodal agents by transforming trajectories into actionable insights. ArXiv, abs/2406.14596, 2024. URL https://api.sementicscholar.org/CorpusID:274466462." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 133, + 508, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 133, + 508, + 224 + ], + "spans": [ + { + "bbox": [ + 106, + 133, + 508, + 224 + ], + "type": "text", + "content": "Timo Schick, Jane Dwivedi-Yu, Roberto Dessi, Roberta Raileanu, Maria Lomeli, Eric Hambro, Luke Zettlemoyer, Nicola Cancedda, and Thomas Scialom. Toolformer: Language models can teach themselves to use tools. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitstream/d842425e4bf79ba039352da0f658a906-Abstract-Conference.html." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 228, + 508, + 319 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 228, + 508, + 319 + ], + "spans": [ + { + "bbox": [ + 106, + 228, + 508, + 319 + ], + "type": "text", + "content": "Peter Shaw, Mandar Joshi, James Cohan, Jonathan Berant, Panupong Pasupat, Hexiang Hu, Urvashi Khandelwal, Kenton Lee, and Kristina Toutanova. From pixels to UI actions: Learning to follow instructions via graphical user interfaces. In Alice Oh, Tristan Naumann, Amir Globerson, Kate Saenko, Moritz Hardt, and Sergey Levine (eds.), Advances in Neural Information Processing Systems 36: Annual Conference on Neural Information Processing Systems 2023, NeurIPS 2023, New Orleans, LA, USA, December 10 - 16, 2023, 2023. URL http://papers.nips.cc/paper_files/paper/2023/bitstream/6c52a8a4fadc9129c6e1d1745f2dfd0f-Abstract-Conference.html." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 324, + 506, + 370 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 324, + 506, + 370 + ], + "spans": [ + { + "bbox": [ + 105, + 324, + 506, + 370 + ], + "type": "text", + "content": "Junhong Shen, Atishay Jain, Zedian Xiao, Ishan Amlekar, Mouad Hadji, Aaron Podolny, and Ameet Talwalkar. Scribeagent: Towards specialized web agents using production-scale workflow data. ArXiv, abs/2411.15004, 2024. URL https://api(semanticscholar.org/ CorpusID:274192657." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 376, + 507, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 376, + 507, + 488 + ], + "spans": [ + { + "bbox": [ + 106, + 376, + 507, + 488 + ], + "type": "text", + "content": "Avi Singh, John D. Co-Reyes, Rishabh Agarwal, Ankesh Anand, Piyush Patil, Xavier Garcia, Peter J. Liu, James Harrison, Jaehoon Lee, Kelvin Xu, Aaron T. Parisi, Abhishek Kumar, Alexander A. Alemi, Alex Rizkowsky, Azade Nova, Ben Adlam, Bernd Bohnet, Gamaleldin Fathy Elsayed, Hanie Sedghi, Igor Mordatch, Isabelle Simpson, Izzeddin Gur, Jasper Snoek, Jeffrey Pennington, Jiri Hron, Kathleen Kenealy, Kevin Swersky, Kshitteej Mahajan, Laura Culp, Lechao Xiao, Maxwell L. Bileschi, Noah Constant, Roman Novak, Rosanne Liu, Tris Warkentin, Yundi Qian, Yamini Bansal, Ethan Dyer, Behnam Neyshabur, Jascha Sohl-Dickstein, and Noah Fiedel. Beyond human data: Scaling self-training for problem-solving with language models. Trans. Mach. Learn. Res., 2024, 2024. URL https://openreview.net/forum?id=1NAyUngGFK." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 494, + 505, + 517 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 494, + 505, + 517 + ], + "spans": [ + { + "bbox": [ + 105, + 494, + 505, + 517 + ], + "type": "text", + "content": "Paloma Sodhi, S. R. K. Branavan, Yoav Artzi, and Ryan McDonald. Step: Stacked llm policies for web actions, 2024. URL https://arxiv.org/abs/2310.03720." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 523, + 507, + 559 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 523, + 507, + 559 + ], + "spans": [ + { + "bbox": [ + 106, + 523, + 507, + 559 + ], + "type": "text", + "content": "Yifan Song, Da Yin, Xiang Yue, Jie Huang, Sujian Li, and Bill Yuchen Lin. Trial and error: Exploration-based trajectory optimization for LLM agents. CoRR, abs/2403.02502, 2024a. doi: 10.48550/ARXIV.2403.02502. URL https://doi.org/10.48550/arXiv.2403.02502." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 564, + 507, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 564, + 507, + 599 + ], + "spans": [ + { + "bbox": [ + 106, + 564, + 507, + 599 + ], + "type": "text", + "content": "Yueqi Song, Frank F. Xu, Shuyan Zhou, and Graham Neubig. Beyond browsing: Api-based web agents. ArXiv, abs/2410.16464, 2024b. URL https://api-semanticscholar.org/ CorpusID:273507298." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 605, + 507, + 650 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 605, + 507, + 650 + ], + "spans": [ + { + "bbox": [ + 106, + 605, + 507, + 650 + ], + "type": "text", + "content": "Hongjin Su, Ruoxi Sun, Jinsung Yoon, Pengcheng Yin, Tao Yu, and Sercan Ö. Arik. Learn-by-interact: A data-centric framework for self-adaptive agents in realistic environments. CoRR, abs/2501.10893, 2025. doi: 10.48550/ARXIV.2501.10893. URL https://doi.org/10.48550/arXiv.2501.10893." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 656, + 507, + 702 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 656, + 507, + 702 + ], + "spans": [ + { + "bbox": [ + 106, + 656, + 507, + 702 + ], + "type": "text", + "content": "Fahim Tajwar, Yiding Jiang, Abitha Thankaraj, Sumaita Sadia Rahman, J. Zico Kolter, Jeff Schneider, and Ruslan Salakhutdinov. Training a generally curious agent. CoRR, abs/2502.17543, 2025. doi: 10.48550/ARXIV.2502.17543. URL https://doi.org/10.48550/arXiv.2502.17543." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 708, + 507, + 734 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 708, + 507, + 734 + ], + "spans": [ + { + "bbox": [ + 105, + 708, + 507, + 734 + ], + "type": "text", + "content": "Guanzhi Wang, Yuqi Xie, Yunfan Jiang, Ajay Mandlekar, Chaowei Xiao, Yuke Zhu, Linxi (Jim) Fan, and Anima Anandkumar. Voyager: An open-ended embodied agent" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 82, + 506, + 732 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 115, + 82, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 82, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 115, + 82, + 505, + 105 + ], + "type": "text", + "content": "with large language models. Trans. Mach. Learn. Res., 2024, 2023a. URL https://api.sementicscholar.org/CorpusID:258887849." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 107, + 113, + 506, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 113, + 506, + 168 + ], + "spans": [ + { + "bbox": [ + 107, + 113, + 506, + 168 + ], + "type": "text", + "content": "Haiming Wang, Huajian Xin, Chuanyang Zheng, Zhengying Liu, Qingxing Cao, Yinya Huang, Jing Xiong, Han Shi, Enze Xie, Jian Yin, Zhenguo Li, and Xiaodan Liang. LEGO: Neural theorem proving with growing libraries. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=3f5PALef5B." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 177, + 506, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 177, + 506, + 222 + ], + "spans": [ + { + "bbox": [ + 106, + 177, + 506, + 222 + ], + "type": "text", + "content": "Xingyao Wang, Yangyi Chen, Lifan Yuan, Yizhe Zhang, Yunzhu Li, Hao Peng, and Heng Ji. Executable code actions elicit better LLM agents. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024b. URL https://openreview.net/forum?id=jJ9BoXAfFa." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 229, + 506, + 308 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 229, + 506, + 308 + ], + "spans": [ + { + "bbox": [ + 106, + 229, + 506, + 308 + ], + "type": "text", + "content": "Yizhong Wang, Yeganeh Kordi, Swaroop Mishra, Alisa Liu, Noah A. Smith, Daniel Khashabi, and Hannaneh Hajishirzi. Self-instruct: Aligning language models with self-generated instructions. In Anna Rogers, Jordan L. Boyd-Graber, and Naoaki Okazaki (eds.), Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), ACL 2023, Toronto, Canada, July 9-14, 2023, pp. 13484-13508. Association for Computational Linguistics, 2023b. doi: 10.18653/V1/2023.ACL-LONG.754. URL https://doi.org/10.18653/v1/2023.acl-long.754." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 316, + 504, + 350 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 316, + 504, + 350 + ], + "spans": [ + { + "bbox": [ + 106, + 316, + 504, + 350 + ], + "type": "text", + "content": "Zhiruo Wang, Zhoujun Cheng, Hao Zhu, Daniel Fried, and Graham Neubig. What are tools anyway? a survey from the language model perspective. In First Conference on Language Modeling, 2024c. URL https://openreview.net/forum?id=Xh1B90iBSR." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 357, + 504, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 357, + 504, + 392 + ], + "spans": [ + { + "bbox": [ + 106, + 357, + 504, + 392 + ], + "type": "text", + "content": "Zhiruo Wang, Graham Neubig, and Daniel Fried. TroVE: Inducing verifiable and efficient toolboxes for solving programmatic tasks. In *Forty-first International Conference on Machine Learning*, 2024d. URL https://openreview.net/forum?id=DCNCwaMJjI." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 399, + 504, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 399, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 106, + 399, + 504, + 422 + ], + "type": "text", + "content": "Zora Zhiruo Wang, Jiayuan Mao, Daniel Fried, and Graham Neubig. Agent workflow memory. arXiv preprint arXiv:2409.07429, 2024e." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 430, + 504, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 430, + 504, + 475 + ], + "spans": [ + { + "bbox": [ + 106, + 430, + 504, + 475 + ], + "type": "text", + "content": "Zhiyong Wu, Chengcheng Han, Zichen Ding, Zhenmin Weng, Zhoumianze Liu, Shunyu Yao, Tao Yu, and Lingpeng Kong. Os-copilot: Towards generalist computer agents with self-improvement. CoRR, abs/2402.07456, 2024. doi: 10.48550/ARXIV.2402.07456. URL https://doi.org/10.48550/arXiv.2402.07456." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 483, + 506, + 551 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 483, + 506, + 551 + ], + "spans": [ + { + "bbox": [ + 106, + 483, + 506, + 551 + ], + "type": "text", + "content": "Tianbao Xie, Danyang Zhang, Jixuan Chen, Xiaochuan Li, Siheng Zhao, Ruisheng Cao, Toh Jing Hua, Zhoujun Cheng, Dongchan Shin, Fangyu Lei, Yitao Liu, Yiheng Xu, Shuyan Zhou, Silvio Savarese, Caiming Xiong, Victor Zhong, and Tao Yu. Osworld: Benchmarking multimodal agents for open-ended tasks in real computer environments. CoRR, abs/2404.07972, 2024. doi: 10.48550/ARXIV.2404.07972. URL https://doi.org/10.48550/arXiv.2404.07972." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 559, + 506, + 594 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 559, + 506, + 594 + ], + "spans": [ + { + "bbox": [ + 106, + 559, + 506, + 594 + ], + "type": "text", + "content": "Tianci Xue, Weijian Qi, Tianneng Shi, Chan Hee Song, Boyu Gou, Dawn Song, Huan Sun, and Yu Su. An illusion of progress? assessing the current state of web agents. OSU NLP Blog, Mar 2025. URL https://tinyurl.com/online-mind2web-blog." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 601, + 506, + 678 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 601, + 506, + 678 + ], + "spans": [ + { + "bbox": [ + 106, + 601, + 506, + 678 + ], + "type": "text", + "content": "Shunyu Yao, Howard Chen, John Yang, and Karthik Narasimhan. Webshop: Towards scalable real-world web interaction with grounded language agents. In Sanmi Koyejo, S. Mohamed, A. Agarwal, Danielle Belgrave, K. Cho, and A. Oh (eds.), Advances in Neural Information Processing Systems 35: Annual Conference on Neural Information Processing Systems 2022, NeurIPS 2022, New Orleans, LA, USA, November 28 - December 9, 2022, 2022. URL http://papers.nips.cc/paper_files/paper/2022/bit/82ad13ec01f9fe44c01cb91814fd7b8c-Abstract-Conference.htm1." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 686, + 506, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 686, + 506, + 732 + ], + "spans": [ + { + "bbox": [ + 106, + 686, + 506, + 732 + ], + "type": "text", + "content": "Shunyu Yao, Jeffrey Zhao, Dian Yu, Nan Du, Izhak Shafran, Karthik R. Narasimhan, and Yuan Cao. React: Synergizing reasoning and acting in language models. In The Eleventh International Conference on Learning Representations, ICLR 2023, Kigali, Rwanda, May 1-5, 2023. OpenReview.net, 2023. URL https://openreview.net/forum?id=WE_vluYUL-X." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 81, + 506, + 625 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 106, + 81, + 505, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 81, + 505, + 127 + ], + "spans": [ + { + "bbox": [ + 106, + 81, + 505, + 127 + ], + "type": "text", + "content": "Ori Yoran, Samuel Joseph Amouyal, Chaitanya Malaviya, Ben Boin, Ofir Press, and Jonathan Berant. Assistantbench: Can web agents solve realistic and time-consuming tasks? In Conference on Empirical Methods in Natural Language Processing, 2024. URL https://api_semanticscholar.org/CorpusID:271328691." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 133, + 506, + 212 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 133, + 506, + 212 + ], + "spans": [ + { + "bbox": [ + 106, + 133, + 506, + 212 + ], + "type": "text", + "content": "Keen You, Haotian Zhang, Eldon Schoop, Floris Weers, Amanda Swearngin, Jeffrey Nichols, Yinfei Yang, and Zhe Gan. Ferret-ui: Grounded mobile UI understanding with multimodal llms. In Ales Leonardis, Elisa Ricci, Stefan Roth, Olga Russakovsky, Torsten Sattler, and Gül Varol (eds.), Computer Vision - ECCV 2024 - 18th European Conference, Milan, Italy, September 29-October 4, 2024, Proceedings, Part LXIV, volume 15122 of Lecture Notes in Computer Science, pp. 240-255. Springer, 2024. doi: 10.1007/978-3-031-73039-9\\_.14. URL https://doi.org/10.1007/978-3-031-73039-9_14." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 218, + 506, + 262 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 218, + 506, + 262 + ], + "spans": [ + { + "bbox": [ + 106, + 218, + 506, + 262 + ], + "type": "text", + "content": "Lifan Yuan, Yangyi Chen, Xingyao Wang, Yi Fung, Hao Peng, and Heng Ji. CRAFT: Customizing LLMs by creating and retrieving from specialized toolsets. In The Twelfth International Conference on Learning Representations, 2024a. URL https://openreview.net/forum?id=G0vdDSt9XM." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 270, + 506, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 270, + 506, + 315 + ], + "spans": [ + { + "bbox": [ + 106, + 270, + 506, + 315 + ], + "type": "text", + "content": "Weizhe Yuan, Richard Yuanzhe Pang, Kyunghyun Cho, Xian Li, Sainbayar Sukhbaatar, Jing Xu, and Jason Weston. Self-rewarding language models. In *Forty-first International Conference on Machine Learning*, ICML 2024, Vienna, Austria, July 21-27, 2024. OpenReview.net, 2024b. URL https://openreview.net/forum?id=ØNphYCmgua." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 322, + 506, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 322, + 506, + 367 + ], + "spans": [ + { + "bbox": [ + 106, + 322, + 506, + 367 + ], + "type": "text", + "content": "Yao Zhang, Zijian Ma, Yunpu Ma, Zhen Han, Yu Wu, and Volker Tresp. Webpilot: A versatile and autonomous multi-agent system for web task execution with strategic exploration. CoRR, abs/2408.15978, 2024. doi: 10.48550/ARXIV.2408.15978. URL https://doi.org/10.48550/arXiv.2408.15978." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 373, + 506, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 373, + 506, + 407 + ], + "spans": [ + { + "bbox": [ + 106, + 373, + 506, + 407 + ], + "type": "text", + "content": "Boyuan Zheng, Boyu Gou, Jihyung Kil, Huan Sun, and Yu Su. Gpt-4v(isdiction) is a generalist web agent, if grounded. In *Forty-first International Conference on Machine Learning*, 2024. URL https://openreview.net/forum?id=piecKJ2D1B." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 415, + 506, + 459 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 415, + 506, + 459 + ], + "spans": [ + { + "bbox": [ + 106, + 415, + 506, + 459 + ], + "type": "text", + "content": "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, et al. Judging llm-as-a-judge with mt-bench and chatbot arena. Advances in Neural Information Processing Systems, 36:46595-46623, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 466, + 506, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 466, + 506, + 523 + ], + "spans": [ + { + "bbox": [ + 106, + 466, + 506, + 523 + ], + "type": "text", + "content": "Shuyan Zhou, Frank F. Xu, Hao Zhu, Xuhui Zhou, Robert Lo, Abishek Sridhar, Xianyi Cheng, Tianyue Ou, Yonatan Bisk, Daniel Fried, Uri Alon, and Graham Neubig. Webarena: A realistic web environment for building autonomous agents. In The Twelfth International Conference on Learning Representations, ICLR 2024, Vienna, Austria, May 7-11, 2024. OpenReview.net, 2024a. URL https://openreview.net/forum?id=oKn9c6ytLx." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 529, + 506, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 529, + 506, + 574 + ], + "spans": [ + { + "bbox": [ + 106, + 529, + 506, + 574 + ], + "type": "text", + "content": "Yifei Zhou, Qianlan Yang, Kaixiang Lin, Min Bai, Xiong Zhou, Yu-Xiong Wang, Sergey Levine, and Li Erran Li. Proposer-agent-evaluator(pae): Autonomous skill discovery for foundation model internet agents. CoRR, abs/2412.13194, 2024b. doi: 10.48550/ARXIV.2412.13194. URL https://doi.org/10.48550/arXiv.2412.13194." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 581, + 506, + 625 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 581, + 506, + 625 + ], + "spans": [ + { + "bbox": [ + 106, + 581, + 506, + 625 + ], + "type": "text", + "content": "Mingchen Zhuge, Changsheng Zhao, Dylan Ashley, Wenyi Wang, Dmitrii Khizbullin, Yunyang Xiong, Zechun Liu, Ernie Chang, Raghuraman Krishnamoorthi, Yuandong Tian, et al. Agent-as-a-judge: Evaluate agents with agents. arXiv preprint arXiv:2410.10934, 2024." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 310, + 760 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 82, + 186, + 94 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 82, + 186, + 94 + ], + "spans": [ + { + "bbox": [ + 105, + 82, + 186, + 94 + ], + "type": "text", + "content": "Table of Content:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 132, + 99, + 241, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 99, + 241, + 111 + ], + "spans": [ + { + "bbox": [ + 132, + 99, + 241, + 111 + ], + "type": "text", + "content": "- Appendix A: Prompts" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 151, + 113, + 442, + 202 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 151, + 113, + 292, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 113, + 292, + 125 + ], + "spans": [ + { + "bbox": [ + 151, + 113, + 292, + 125 + ], + "type": "text", + "content": "- Appendix A.1: Skill Proposal" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 151, + 126, + 442, + 137 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 126, + 442, + 137 + ], + "spans": [ + { + "bbox": [ + 151, + 126, + 442, + 137 + ], + "type": "text", + "content": "- Appendix A.2: Code Generation Agent Context Representation" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 151, + 139, + 406, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 139, + 406, + 150 + ], + "spans": [ + { + "bbox": [ + 151, + 139, + 406, + 150 + ], + "type": "text", + "content": "- Appendix A.3: Code Generation Agent Error Detection" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 151, + 152, + 304, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 152, + 304, + 163 + ], + "spans": [ + { + "bbox": [ + 151, + 152, + 304, + 163 + ], + "type": "text", + "content": "- Appendix A.4: Action Synthesis" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 151, + 164, + 309, + 176 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 164, + 309, + 176 + ], + "spans": [ + { + "bbox": [ + 151, + 164, + 309, + 176 + ], + "type": "text", + "content": "- Appendix A.5: Success Checking" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 151, + 178, + 402, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 178, + 402, + 190 + ], + "spans": [ + { + "bbox": [ + 151, + 178, + 402, + 190 + ], + "type": "text", + "content": "- Appendix A.6: Persisting Attempt to Knowledge Base" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 151, + 190, + 310, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 190, + 310, + 202 + ], + "spans": [ + { + "bbox": [ + 151, + 190, + 310, + 202 + ], + "type": "text", + "content": "- Appendix A.7: Scraping Prompts" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 204, + 315, + 237 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 132, + 204, + 301, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 204, + 301, + 214 + ], + "spans": [ + { + "bbox": [ + 132, + 204, + 301, + 214 + ], + "type": "text", + "content": "- Appendix B: WebArena Benchmark" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 215, + 315, + 225 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 215, + 315, + 225 + ], + "spans": [ + { + "bbox": [ + 132, + 215, + 315, + 225 + ], + "type": "text", + "content": "- Appendix C: Real World Website Tasks" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 132, + 226, + 266, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 226, + 266, + 237 + ], + "spans": [ + { + "bbox": [ + 132, + 226, + 266, + 237 + ], + "type": "text", + "content": "- Appendix D: Example APIs" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 151, + 239, + 310, + 276 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 151, + 239, + 292, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 239, + 292, + 250 + ], + "spans": [ + { + "bbox": [ + 151, + 239, + 292, + 250 + ], + "type": "text", + "content": "- Appendix D.1: Success Cases" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 151, + 251, + 290, + 262 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 251, + 290, + 262 + ], + "spans": [ + { + "bbox": [ + 151, + 251, + 290, + 262 + ], + "type": "text", + "content": "- Appendix D.2: Failure Cases" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 151, + 264, + 310, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 264, + 310, + 276 + ], + "spans": [ + { + "bbox": [ + 151, + 264, + 310, + 276 + ], + "type": "text", + "content": "- Appendix D.3: Composition API." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 81, + 176, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 81, + 176, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 81, + 176, + 95 + ], + "type": "text", + "content": "A Prompts" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 106, + 199, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 106, + 199, + 121 + ], + "spans": [ + { + "bbox": [ + 105, + 106, + 199, + 121 + ], + "type": "text", + "content": "A.1 Skill Proposal" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 128, + 506, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 128, + 506, + 163 + ], + "spans": [ + { + "bbox": [ + 104, + 128, + 506, + 163 + ], + "type": "text", + "content": "In case the agent is being used on a live website, we add safety instructions which bias the agent away from actions that could potentially have side effects or that would otherwise be disadvantageous to explore, such as creating accounts or interacting with human users." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 171, + 244, + 184 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 171, + 244, + 184 + ], + "spans": [ + { + "bbox": [ + 121, + 171, + 244, + 184 + ], + "type": "text", + "content": "Proposing Tasks to Explore" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 121, + 193, + 491, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 193, + 491, + 206 + ], + "spans": [ + { + "bbox": [ + 121, + 193, + 491, + 206 + ], + "type": "text", + "content": "System: You propose tasks that would make good 'tools' for external users of a website. User:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 209, + 463, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 209, + 463, + 242 + ], + "spans": [ + { + "bbox": [ + 121, + 209, + 463, + 242 + ], + "type": "text", + "content": "You are a 'web agent' who is learning how to use a website. You write \"skills\" (shortcuts) for common website tasks, by proposing Python functions that would automate these tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 121, + 249, + 375, + 291 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 249, + 375, + 291 + ], + "spans": [ + { + "bbox": [ + 121, + 249, + 375, + 291 + ], + "type": "text", + "content": "You have already proposed the following skills: \n \n{procedural knowledge} \n" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 121, + 299, + 462, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 299, + 462, + 351 + ], + "spans": [ + { + "bbox": [ + 121, + 299, + 462, + 351 + ], + "type": "text", + "content": "You have built up the following knowledge about the website (in addition to the current screenshot): {semantic_knowledge} " + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 121, + 359, + 471, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 359, + 471, + 430 + ], + "spans": [ + { + "bbox": [ + 121, + 359, + 471, + 430 + ], + "type": "text", + "content": "Now please come up with something new to learn how to do on this website. The website is structured according to the following accessibility tree hierarchy: {ax_tree} " + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 121, + 438, + 468, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 438, + 468, + 480 + ], + "spans": [ + { + "bbox": [ + 121, + 438, + 468, + 480 + ], + "type": "text", + "content": "Do not interact with the Advanced Reporting tab if you are using \nMagenta. \nDo not interact with login/login/user accounts on any site. \nIf you're on OpenStreetMap, don't interact with community featur" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 121, + 488, + 485, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 488, + 485, + 580 + ], + "spans": [ + { + "bbox": [ + 121, + 488, + 485, + 580 + ], + "type": "text", + "content": "Write a list of useful skills/shortcuts that you would want to have built into a website as Python functions. Write the name in natural language format. Do not use " + }, + { + "bbox": [ + 121, + 488, + 485, + 580 + ], + "type": "inline_equation", + "content": "*\\_id" + }, + { + "bbox": [ + 121, + 488, + 485, + 580 + ], + "type": "text", + "content": " as a parameter in your skill. Again, your goal is to generate functions that would be useful \"shortcuts\" for users of the website, so you should prioritize generating skills that compress a couple interactions into a single function call. Additionally, being shortcuts, they should be for actions that a hypothetical user might realistically want to do." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 121, + 588, + 203, + 598 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 588, + 203, + 598 + ], + "spans": [ + { + "bbox": [ + 121, + 588, + 203, + 598 + ], + "type": "text", + "content": "Then, estimate:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 123, + 599, + 487, + 649 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 123, + 599, + 473, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 599, + 473, + 618 + ], + "spans": [ + { + "bbox": [ + 123, + 599, + 473, + 618 + ], + "type": "text", + "content": "(1) how useful they are (5 being difficult and frequency, 1 being trivial or uncommon)," + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 123, + 619, + 487, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 619, + 487, + 649 + ], + "spans": [ + { + "bbox": [ + 123, + 619, + 487, + 649 + ], + "type": "text", + "content": "(2) the expected number of clicking/typing actions required to complete the skill. (calculate this by writing the list of steps and counting AFTERWARDs)" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 122, + 658, + 488, + 688 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 658, + 488, + 688 + ], + "spans": [ + { + "bbox": [ + 122, + 658, + 488, + 688 + ], + "type": "text", + "content": "Prefer to generate skills that are creating, modifying, or filtering/querying data on the website, as these tend to be more useful." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 121, + 689, + 417, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 689, + 417, + 700 + ], + "spans": [ + { + "bbox": [ + 121, + 689, + 417, + 700 + ], + "type": "text", + "content": "Do not generate skills simply to perform single clicks." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 122, + 708, + 236, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 708, + 236, + 720 + ], + "spans": [ + { + "bbox": [ + 122, + 708, + 236, + 720 + ], + "type": "text", + "content": "{safetyInstructions}" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 99, + 478, + 121 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 99, + 478, + 121 + ], + "spans": [ + { + "bbox": [ + 121, + 99, + 478, + 121 + ], + "type": "text", + "content": "Then, calculate the sum of these ratings for each skill. Finally, select the skill with the highest rating." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 122, + 121, + 468, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 121, + 468, + 141 + ], + "spans": [ + { + "bbox": [ + 122, + 121, + 468, + 141 + ], + "type": "text", + "content": "Write your ratings in `step_by_step_reasoning`. Then, write your skill choice in `proposed_skill`." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 170, + 350, + 183 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 170, + 350, + 183 + ], + "spans": [ + { + "bbox": [ + 105, + 170, + 350, + 183 + ], + "type": "text", + "content": "A.2 Code Generation Agent Context Representation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 191, + 265, + 205 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 191, + 265, + 205 + ], + "spans": [ + { + "bbox": [ + 105, + 191, + 265, + 205 + ], + "type": "text", + "content": "A.2.1 States as Accessibility Trees" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 211, + 506, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 211, + 506, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 211, + 506, + 277 + ], + "type": "text", + "content": "We represent the state as a tuple containing an screenshot of the page, a title, a URL, and an accessibility tree hierarchy. These are serialized by concatenating clean string representations of each of these properties. We represent each node in the accessibility hierarchy as a line containing (1) indentation representing the depth in the tree, (2) the accessible role, (3) the accessible name of the element, in quotation marks, and finally (4) any other accessibility tree information (represented as a ia- attributes in the DOM.)" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 290, + 179, + 303 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 290, + 179, + 303 + ], + "spans": [ + { + "bbox": [ + 105, + 290, + 179, + 303 + ], + "type": "text", + "content": "A.2.2 Prompts" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 308, + 385, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 308, + 385, + 323 + ], + "spans": [ + { + "bbox": [ + 105, + 308, + 385, + 323 + ], + "type": "text", + "content": "Substrings in square brackets ([]) are omitted if not applicable." + } + ] + } + ], + "index": 6 + }, + { + "type": "code", + "bbox": [ + 106, + 345, + 504, + 729 + ], + "blocks": [ + { + "bbox": [ + 121, + 330, + 282, + 342 + ], + "lines": [ + { + "bbox": [ + 121, + 330, + 282, + 342 + ], + "spans": [ + { + "bbox": [ + 121, + 330, + 282, + 342 + ], + "type": "text", + "content": "Code Agent Context Representation" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 106, + 345, + 504, + 729 + ], + "lines": [ + { + "bbox": [ + 106, + 345, + 504, + 729 + ], + "spans": [ + { + "bbox": [ + 106, + 345, + 504, + 729 + ], + "type": "text", + "content": "URL: {relative_url} \n{ state_accessibility_tree_string } \n \n{step_by_step_reasoning} \n \n \n{action_source} \n \n[ \n{stdout} \n] \n[ \n{return_value} \n] \n[< warnings> \n{ warnings} \n] \n[ \n{ recovered_exception} \n, ...] \n[ \n{unrecoverable_exception} \n] \nURL: {relative_url}" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "code_body" + } + ], + "index": 8, + "sub_type": "code", + "guess_lang": "tcl" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 120, + 99, + 257, + 240 + ], + "blocks": [ + { + "bbox": [ + 120, + 99, + 257, + 240 + ], + "lines": [ + { + "bbox": [ + 120, + 99, + 257, + 240 + ], + "spans": [ + { + "bbox": [ + 120, + 99, + 257, + 240 + ], + "type": "text", + "content": "{state_accessibility_tree_string} \n \n... \n \nURL: {relative_url} \n{state_accessibility_tree_string} \n \n \n{result} \n" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_body" + } + ], + "index": 0, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 105, + 269, + 313, + 282 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 269, + 313, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 269, + 313, + 282 + ], + "type": "text", + "content": "A.3 Code Generation Agent Error Detection" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 289, + 504, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 289, + 504, + 314 + ], + "spans": [ + { + "bbox": [ + 104, + 289, + 504, + 314 + ], + "type": "text", + "content": "This is the prompt template used if we detect any errors in the agent's code. We use the following error messages:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 127, + 321, + 504, + 520 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 128, + 321, + 440, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 128, + 321, + 440, + 332 + ], + "spans": [ + { + "bbox": [ + 128, + 321, + 440, + 332 + ], + "type": "text", + "content": "1. A function named act not provided: Function name must be 'act'" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 127, + 335, + 504, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 335, + 504, + 358 + ], + "spans": [ + { + "bbox": [ + 127, + 335, + 504, + 358 + ], + "type": "text", + "content": "2. The function act does not contain a single argument, page: Function must take exactly one argument: 'page'." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 127, + 361, + 504, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 361, + 504, + 383 + ], + "spans": [ + { + "bbox": [ + 127, + 361, + 504, + 383 + ], + "type": "text", + "content": "3. The function function is disabled: Function 'fnname' is disabled. Please use another function." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 127, + 386, + 504, + 421 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 386, + 504, + 421 + ], + "spans": [ + { + "bbox": [ + 127, + 386, + 504, + 421 + ], + "type": "text", + "content": "4. Incorrect or buggy Playwright functions: Please use the 'page.get_by...().f()' functions instead of the 'page.f(selector)' functions. where " + }, + { + "bbox": [ + 127, + 386, + 504, + 421 + ], + "type": "inline_equation", + "content": "f \\in \\{click, fill, type\\}" + }, + { + "bbox": [ + 127, + 386, + 504, + 421 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 127, + 422, + 504, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 422, + 504, + 468 + ], + "spans": [ + { + "bbox": [ + 127, + 422, + 504, + 468 + ], + "type": "text", + "content": "5. Use of CSS selectors instead of accessibility tree selectors: Please use Accessibility Tree-centric selectors, like 'page.get_by_role()', '.nth()', instead of the CSS-style selectors like '.locator' or '.query_selector()'." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 127, + 470, + 504, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 470, + 504, + 502 + ], + "spans": [ + { + "bbox": [ + 127, + 470, + 504, + 502 + ], + "type": "text", + "content": "6. Blank response: You did not provide any Python code, but you also did not provide a result for 'terminate_with_result'. Please provide one or the other." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 127, + 506, + 293, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 506, + 293, + 520 + ], + "spans": [ + { + "bbox": [ + 127, + 506, + 293, + 520 + ], + "type": "text", + "content": "7. Type errors: Type Error: {error}" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 531, + 212, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 531, + 212, + 544 + ], + "spans": [ + { + "bbox": [ + 105, + 531, + 212, + 544 + ], + "type": "text", + "content": "A.4 Action Synthesis" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 121, + 556, + 261, + 569 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 556, + 261, + 569 + ], + "spans": [ + { + "bbox": [ + 121, + 556, + 261, + 569 + ], + "type": "text", + "content": "Code Agent Action Generation" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 121, + 578, + 419, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 578, + 419, + 590 + ], + "spans": [ + { + "bbox": [ + 121, + 578, + 419, + 590 + ], + "type": "text", + "content": "You generate Playwright code to interact with websites. Words of wisdom:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 147, + 593, + 490, + 720 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 147, + 593, + 490, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 593, + 490, + 635 + ], + "spans": [ + { + "bbox": [ + 147, + 593, + 490, + 635 + ], + "type": "text", + "content": "- If you want to click a generic button (e.g., that belongs to an element), use the full .get_by-role() path to the element (e.g., .get_by-role(\"group\", name=\"Test Item\").get_by-role(\"button\", name=\"Go\") instead of .get_by-role(\"button\", name=\"Go\"), as this is ambiguous)." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 147, + 637, + 317, + 648 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 637, + 317, + 648 + ], + "spans": [ + { + "bbox": [ + 147, + 637, + 317, + 648 + ], + "type": "text", + "content": "- Write exceptionally correct Python code." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 147, + 651, + 490, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 651, + 490, + 682 + ], + "spans": [ + { + "bbox": [ + 147, + 651, + 490, + 682 + ], + "type": "text", + "content": "- You love to take advantage of functions in the knowledge_base whenever possible. You use them via Python function calls. It is required to use the knowledge base function corresponding to an action if it exists." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 147, + 685, + 460, + 720 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 147, + 685, + 460, + 720 + ], + "spans": [ + { + "bbox": [ + 147, + 685, + 460, + 720 + ], + "type": "text", + "content": "- Use relative goto when you can. \nare currently on a webpage titled {title_repr}, with the URL {url_repr}. \nder the following Accessibility Tree. The accessibility tree is formatted like this:" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 119, + 86, + 489, + 721 + ], + "blocks": [ + { + "bbox": [ + 119, + 86, + 489, + 721 + ], + "lines": [ + { + "bbox": [ + 119, + 86, + 489, + 721 + ], + "spans": [ + { + "bbox": [ + 119, + 86, + 489, + 721 + ], + "type": "text", + "content": "[role] [\"name\"]\n[properties] {{{child1]}}; \n[role] [\"name\"]\n[properties]; // no children \nHere is an example: \n \narticle \"Willy Wonka's Chocolate Factory Opens Its Gates\" {{{link \"Share\";}}} \n}; \narticle \"Hogwarts Begins Accepting Applicants for Class of 2029\" {{{link \"Share\";}}} \n \nTo select the first link, you can do (because name = case-insensitive substring match unless exact=True is supplied): \nfirst_link = page.get_by_role(\"article\", name=\"Willy Wonka\").get_by_role(\"link\", name=\"Share\") \nHere is another example: \n \narticle {{{header \"How can I install CUDA on Ubuntu 22?\"; link \"Share\";}}} \n}; \narticle {{{header \"How do I install Docker?\"; link \"Share\";}}} \n \nTo select the first link, you can do: \npage.get_byRole(\"article\").filter (has=page.get_byRole(\"header\", \"How can I install CUDA\").get_byRole(\"link\", name=\"Share\") \nHere, the .filter() is necessary to ensure that we select the correct article. This is especially important in cases where the parent element doesn't include relevant criteria for identifying a child element. Tips about this tree: \n· If you see a node as a child of an iframe, you must use page.frame(name=...), and then access the node (via .get_by ROLE() or similar). \n· Some elements will require interaction via .select_option(). They will be labeled as such. This is because they are HTML aka get_by_placeholder(\"Search GitLab\")" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 138, + 605, + 486, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 605, + 486, + 647 + ], + "spans": [ + { + "bbox": [ + 138, + 605, + 486, + 647 + ], + "type": "text", + "content": "2) > aka locator([[id=\\\"__BVID__158\\\"]])" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 654, + 159, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 654, + 159, + 665 + ], + "spans": [ + { + "bbox": [ + 121, + 654, + 159, + 665 + ], + "type": "text", + "content": "Call log:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 121, + 665, + 181, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 665, + 181, + 675 + ], + "spans": [ + { + "bbox": [ + 121, + 665, + 181, + 675 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 122, + 705, + 163, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 705, + 163, + 714 + ], + "spans": [ + { + "bbox": [ + 122, + 705, + 163, + 714 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "32" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 31 + }, + { + "para_blocks": [ + { + "bbox": [ + 120, + 88, + 384, + 102 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 88, + 384, + 102 + ], + "spans": [ + { + "bbox": [ + 120, + 88, + 384, + 102 + ], + "type": "text", + "content": "URL: /byte blaze/accessible- html-content-patterns/-/branches" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 121, + 110, + 165, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 110, + 165, + 129 + ], + "spans": [ + { + "bbox": [ + 121, + 110, + 165, + 129 + ], + "type": "text", + "content": "[Omitted] " + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 140, + 176, + 150 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 140, + 176, + 150 + ], + "spans": [ + { + "bbox": [ + 121, + 140, + 176, + 150 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 150, + 480, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 150, + 480, + 200 + ], + "spans": [ + { + "bbox": [ + 121, + 150, + 480, + 200 + ], + "type": "text", + "content": "The page currently displays a dialog that asks for confirmation to delete all merged branches. It requires typing 'delete' into an input box. After typing the confirmation text, the \"Delete merged branches\" button can be clicked to proceed. I will fill the textbox with 'delete' and then click the confirmation button to complete the task." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 122, + 201, + 181, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 201, + 181, + 210 + ], + "spans": [ + { + "bbox": [ + 122, + 201, + 181, + 210 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 122, + 220, + 156, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 220, + 156, + 229 + ], + "spans": [ + { + "bbox": [ + 122, + 220, + 156, + 229 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 5 + }, + { + "type": "code", + "bbox": [ + 130, + 245, + 485, + 346 + ], + "blocks": [ + { + "bbox": [ + 130, + 245, + 485, + 346 + ], + "lines": [ + { + "bbox": [ + 130, + 245, + 485, + 346 + ], + "spans": [ + { + "bbox": [ + 130, + 245, + 485, + 346 + ], + "type": "text", + "content": "async def act(page): # Fill the confirmation textbox with 'delete' await page.get_by_role(\"dialog\", name=\"Delete all merged branches?\").get_by-role(\"textbox\").fill(\"delete\") # Click the \"Delete merged branches\" button to confirm deletion await page.get_by-role(\"dialog\", name=\"Delete all merged branches?\").get_by-role(\"button\", name=\"Delete merged branches\").click()" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "code", + "guess_lang": "python" + }, + { + "type": "code", + "bbox": [ + 121, + 364, + 162, + 391 + ], + "blocks": [ + { + "bbox": [ + 121, + 364, + 162, + 391 + ], + "lines": [ + { + "bbox": [ + 121, + 364, + 162, + 391 + ], + "spans": [ + { + "bbox": [ + 121, + 364, + 162, + 391 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "code_body" + } + ], + "index": 7, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 121, + 405, + 162, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 405, + 162, + 413 + ], + "spans": [ + { + "bbox": [ + 121, + 405, + 162, + 413 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 122, + 414, + 383, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 414, + 383, + 425 + ], + "spans": [ + { + "bbox": [ + 122, + 414, + 383, + 425 + ], + "type": "text", + "content": "URL: /byte blaze/accessible- html-content-patterns/-/branches" + } + ] + } + ], + "index": 9 + }, + { + "type": "code", + "bbox": [ + 122, + 434, + 165, + 453 + ], + "blocks": [ + { + "bbox": [ + 122, + 434, + 165, + 453 + ], + "lines": [ + { + "bbox": [ + 122, + 434, + 165, + 453 + ], + "spans": [ + { + "bbox": [ + 122, + 434, + 165, + 453 + ], + "type": "text", + "content": "[Omitted] " + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "code_body" + } + ], + "index": 10, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 121, + 464, + 222, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 464, + 222, + 473 + ], + "spans": [ + { + "bbox": [ + 121, + 464, + 222, + 473 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 122, + 474, + 473, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 474, + 473, + 495 + ], + "spans": [ + { + "bbox": [ + 122, + 474, + 473, + 495 + ], + "type": "text", + "content": "The merged branches have been successfully deleted in the 'byteblaze/accessible-xml-content-patterns' project." + } + ] + } + ], + "index": 12 + }, + { + "type": "code", + "bbox": [ + 122, + 495, + 228, + 520 + ], + "blocks": [ + { + "bbox": [ + 122, + 495, + 228, + 520 + ], + "lines": [ + { + "bbox": [ + 122, + 495, + 228, + 520 + ], + "spans": [ + { + "bbox": [ + 122, + 495, + 228, + 520 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "code_body" + } + ], + "index": 13, + "sub_type": "code", + "guess_lang": "erb" + }, + { + "bbox": [ + 122, + 534, + 162, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 534, + 162, + 543 + ], + "spans": [ + { + "bbox": [ + 122, + 534, + 162, + 543 + ], + "type": "text", + "content": "" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 122, + 544, + 383, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 544, + 383, + 555 + ], + "spans": [ + { + "bbox": [ + 122, + 544, + 383, + 555 + ], + "type": "text", + "content": "URL: /byte blaze/accessible- html-content-patterns/~/branches" + } + ] + } + ], + "index": 15 + }, + { + "type": "code", + "bbox": [ + 122, + 563, + 165, + 583 + ], + "blocks": [ + { + "bbox": [ + 122, + 563, + 165, + 583 + ], + "lines": [ + { + "bbox": [ + 122, + 563, + 165, + 583 + ], + "spans": [ + { + "bbox": [ + 122, + 563, + 165, + 583 + ], + "type": "text", + "content": "[Omitted] " + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "code_body" + } + ], + "index": 16, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 104, + 697, + 504, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 697, + 504, + 732 + ], + "spans": [ + { + "bbox": [ + 104, + 697, + 504, + 732 + ], + "type": "text", + "content": "Here is the fix the agent made: removing the ambiguous DOM selector for \" textbox\", and replacing it with one that first selects the dialog window that contains the textbox, and then the textbox element within it." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "33" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 32 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 83, + 231, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 83, + 231, + 95 + ], + "spans": [ + { + "bbox": [ + 121, + 83, + 231, + 95 + ], + "type": "text", + "content": "delete_merged_branches" + } + ] + } + ], + "index": 0 + }, + { + "type": "code", + "bbox": [ + 119, + 112, + 489, + 487 + ], + "blocks": [ + { + "bbox": [ + 119, + 112, + 489, + 487 + ], + "lines": [ + { + "bbox": [ + 119, + 112, + 489, + 487 + ], + "spans": [ + { + "bbox": [ + 119, + 112, + 489, + 487 + ], + "type": "text", + "content": "async def deletemergedbranches(page, project_path):\n return (0, 1)\n [0, 1] = 0\n [0, 1] = 1\n [0, 1] = 2\n [0, 1] = 3\n [0, 1] = 4\n [0, 1] = 5\n [0, 1] = 6\n [0, 1] = 7\n [0, 1] = 8\n [0, 1] = 9\n [0, 1] = 10\n [0, 1] = 11\n [0, 1] = 12\n [0, 1] = 13\n [0, 1] = 14\n [0, 1] = 15\n [0, 1] = 16\n [0, 1] = 17\n [0, 1] = 18\n [0, 1] = 19\n [0, 1] = 20\n [0, 1] = 21\n [0, 1] = 22\n [0, 1] = 23\n [0, 1] = 24\n [0, 1] = 25\n [0, 1] = 26\n [0, 1] = 27\n [0, 1] = 28\n [0, 1] = 29\n [0, 1] = 30\n [0, 1] = 31\n [0, 1] = 32\n [0, 1] = 33\n [0, 1] = 34\n [0, 1] = 35\n [0, 1] = 36\n [0, 1] = 37\n [0, 1] = 38\n [0, 1] = 39\n [0, 1] = 40\n [0, 1] = 41\n [0, 1] = 42\n [0, 1] = 43\n [0, 1] = 44\n [0, 1] = 45\n [0, 1] = 46\n [0, 1] = 47\n [0, 1] = 48\n [0, 1] = 49\n [0, 1] = 50\n [0, 1] = 51\n [0, 1] = 52\n [0, 1] = 53\n [0, 1] = 54\n [0, 1] = 55\n [0, 1] = 56\n [0, 1] = 57\n [0, 1] = 58\n [0, 1] = 59\n [0, 1] = 60\n [0, 1] = 61\n [0, 1] = 62\n [0, 1] = 63\n [0, 1] = 64\n [0, 1] = 65\n [0, 1] = 66\n [0, 1] = 67\n [0, 1] = 68\n [0, 1] = 69\n [0, 1] = 70\n [0, 1] = 71\n [0, 1] = 72\n [0, 1] =" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 105, + 530, + 269, + 542 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 530, + 269, + 542 + ], + "spans": [ + { + "bbox": [ + 105, + 530, + 269, + 542 + ], + "type": "text", + "content": "D.2 Failure Cases and Limitations" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 555, + 506, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 555, + 506, + 578 + ], + "spans": [ + { + "bbox": [ + 104, + 555, + 506, + 578 + ], + "type": "text", + "content": "We observe some failure cases, which seem to be caused by gaps in LLM reasoning capabilities for certain situations." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 602, + 270, + 616 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 602, + 270, + 616 + ], + "spans": [ + { + "bbox": [ + 105, + 602, + 270, + 616 + ], + "type": "text", + "content": "D.2.1 Criteria for Verifying Success" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 626, + 504, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 626, + 504, + 673 + ], + "spans": [ + { + "bbox": [ + 104, + 626, + 504, + 673 + ], + "type": "text", + "content": "Because our criteria for a function to be \"verified\" was to have it be called without producing an exception, we found that occasionally, malfunctioning APIs could be marked as verified simply because they silenced all exceptions that could have occurred. This represents a measure for evaluation having unintended consequences." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 676, + 505, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 676, + 505, + 733 + ], + "spans": [ + { + "bbox": [ + 104, + 676, + 505, + 733 + ], + "type": "text", + "content": "In the following example, a function was misused, and it was unable to resolve the necessary DOM elements given the page state. However, instead of improving the function's signature or adding a check to ensure the function was called correctly, the LLM adds \"if\" statements to simply avoid any of the atomic actions from producing an error. While this does reduce the number of exceptions, it does not improve the robustness of the API." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "34" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 33 + }, + { + "para_blocks": [ + { + "bbox": [ + 121, + 83, + 236, + 95 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 83, + 236, + 95 + ], + "spans": [ + { + "bbox": [ + 121, + 83, + 236, + 95 + ], + "type": "text", + "content": "update_productquantity" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 130, + 114, + 436, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 114, + 436, + 125 + ], + "spans": [ + { + "bbox": [ + 130, + 114, + 436, + 125 + ], + "type": "text", + "content": "async def update_product_quantity(page, product_name, new_quantity):" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 149, + 125, + 163, + 129 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 125, + 163, + 129 + ], + "spans": [ + { + "bbox": [ + 149, + 125, + 163, + 129 + ], + "type": "text", + "content": "1 1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 148, + 135, + 469, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 135, + 469, + 155 + ], + "spans": [ + { + "bbox": [ + 148, + 135, + 469, + 155 + ], + "type": "text", + "content": "Update the quantity of a specified product in the Magento Admin Panel's Advanced Inventory settings." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 148, + 164, + 465, + 184 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 164, + 465, + 184 + ], + "spans": [ + { + "bbox": [ + 148, + 164, + 465, + 184 + ], + "type": "text", + "content": "This function navigates to the product's edit page, opens the Advanced Inventory dialog," + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 148, + 185, + 465, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 185, + 465, + 205 + ], + "spans": [ + { + "bbox": [ + 148, + 185, + 465, + 205 + ], + "type": "text", + "content": "and updates the quantity to the specified value. It includes proactive checks for element availability." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 149, + 214, + 225, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 214, + 225, + 224 + ], + "spans": [ + { + "bbox": [ + 149, + 214, + 225, + 224 + ], + "type": "text", + "content": "Initial UI State:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 149, + 225, + 395, + 234 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 225, + 395, + 234 + ], + "spans": [ + { + "bbox": [ + 149, + 225, + 395, + 234 + ], + "type": "text", + "content": "- The user must be logged into the Magento Admin Panel." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 149, + 235, + 487, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 235, + 487, + 255 + ], + "spans": [ + { + "bbox": [ + 149, + 235, + 487, + 255 + ], + "type": "text", + "content": "- The function assumes the product edit page is accessible via the '/admin/catalog/product/edit/name/{product_name}' URL." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 149, + 266, + 171, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 266, + 171, + 274 + ], + "spans": [ + { + "bbox": [ + 149, + 266, + 171, + 274 + ], + "type": "text", + "content": "Args:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 148, + 275, + 463, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 275, + 463, + 285 + ], + "spans": [ + { + "bbox": [ + 148, + 275, + 463, + 285 + ], + "type": "text", + "content": "page (Page): The playwright page object representing the browser page." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 149, + 285, + 391, + 294 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 285, + 391, + 294 + ], + "spans": [ + { + "bbox": [ + 149, + 285, + 391, + 294 + ], + "type": "text", + "content": "product_name(str): The name of the product to update." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 149, + 295, + 418, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 295, + 418, + 304 + ], + "spans": [ + { + "bbox": [ + 149, + 295, + 418, + 304 + ], + "type": "text", + "content": "newquantity(int):The new quantity to set for the product." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 149, + 315, + 194, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 315, + 194, + 324 + ], + "spans": [ + { + "bbox": [ + 149, + 315, + 194, + 324 + ], + "type": "text", + "content": "Usage Log:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 149, + 325, + 445, + 334 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 325, + 445, + 334 + ], + "spans": [ + { + "bbox": [ + 149, + 325, + 445, + 334 + ], + "type": "text", + "content": "- Successfully updated the quantity for 'Arcadio Gym Short' to 50." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 149, + 335, + 422, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 335, + 422, + 343 + ], + "spans": [ + { + "bbox": [ + 149, + 335, + 422, + 343 + ], + "type": "text", + "content": "- Adjusted the quantity for 'Yoga Mat' to 100 without issues." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 149, + 345, + 463, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 345, + 463, + 354 + ], + "spans": [ + { + "bbox": [ + 149, + 345, + 463, + 354 + ], + "type": "text", + "content": "- Encountered a timeout error when trying to update 'Sprite Yoga Mat'." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 149, + 354, + 465, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 354, + 465, + 364 + ], + "spans": [ + { + "bbox": [ + 149, + 354, + 465, + 364 + ], + "type": "text", + "content": "Consider checking the availability of the 'Advanced Inventory' button." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 149, + 364, + 163, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 364, + 163, + 369 + ], + "spans": [ + { + "bbox": [ + 149, + 364, + 163, + 369 + ], + "type": "text", + "content": "1 1" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 149, + 375, + 190, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 375, + 190, + 384 + ], + "spans": [ + { + "bbox": [ + 149, + 375, + 190, + 384 + ], + "type": "text", + "content": "import re" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 149, + 385, + 213, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 385, + 213, + 394 + ], + "spans": [ + { + "bbox": [ + 149, + 385, + 213, + 394 + ], + "type": "text", + "content": "import asyncio" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 149, + 403, + 454, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 403, + 454, + 414 + ], + "spans": [ + { + "bbox": [ + 149, + 403, + 454, + 414 + ], + "type": "text", + "content": "await page.goto(f\"/admin/catalog/product/edit/name/{product_name}/\"" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 149, + 415, + 460, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 415, + 460, + 433 + ], + "spans": [ + { + "bbox": [ + 149, + 415, + 460, + 433 + ], + "type": "text", + "content": "advanced_inventory_button = page.get_by-role(\"button\", name=\"Advanced Inventory\")" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 149, + 434, + 360, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 434, + 360, + 443 + ], + "spans": [ + { + "bbox": [ + 149, + 434, + 360, + 443 + ], + "type": "text", + "content": "if await advanced_inventory_button.count() > 0:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 149, + 445, + 342, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 445, + 342, + 454 + ], + "spans": [ + { + "bbox": [ + 149, + 445, + 342, + 454 + ], + "type": "text", + "content": "await advanced_inventory_button.click()" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 149, + 455, + 171, + 462 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 455, + 171, + 462 + ], + "spans": [ + { + "bbox": [ + 149, + 455, + 171, + 462 + ], + "type": "text", + "content": "else:" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 149, + 464, + 369, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 464, + 369, + 473 + ], + "spans": [ + { + "bbox": [ + 149, + 464, + 369, + 473 + ], + "type": "text", + "content": "print(\"Advanced Inventory button not found.\")" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 167, + 475, + 195, + 483 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 475, + 195, + 483 + ], + "spans": [ + { + "bbox": [ + 167, + 475, + 195, + 483 + ], + "type": "text", + "content": "return" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 149, + 484, + 419, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 484, + 419, + 494 + ], + "spans": [ + { + "bbox": [ + 149, + 484, + 419, + 494 + ], + "type": "text", + "content": "qty_input = page.get_by-role(\"textbox\", name=\"[GLOBAL] Qty\")" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 149, + 495, + 288, + 503 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 495, + 288, + 503 + ], + "spans": [ + { + "bbox": [ + 149, + 495, + 288, + 503 + ], + "type": "text", + "content": "if await qty_input.count() > 0:" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 167, + 505, + 342, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 505, + 342, + 514 + ], + "spans": [ + { + "bbox": [ + 167, + 505, + 342, + 514 + ], + "type": "text", + "content": "await qty_input fills(str(new_quantity))" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 149, + 514, + 171, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 514, + 171, + 522 + ], + "spans": [ + { + "bbox": [ + 149, + 514, + 171, + 522 + ], + "type": "text", + "content": "else:" + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 149, + 524, + 388, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 524, + 388, + 533 + ], + "spans": [ + { + "bbox": [ + 149, + 524, + 388, + 533 + ], + "type": "text", + "content": "print(\"Quantity input not found.\")" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 167, + 535, + 195, + 542 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 535, + 195, + 542 + ], + "spans": [ + { + "bbox": [ + 167, + 535, + 195, + 542 + ], + "type": "text", + "content": "return" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 149, + 544, + 388, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 544, + 388, + 553 + ], + "spans": [ + { + "bbox": [ + 149, + 544, + 388, + 553 + ], + "type": "text", + "content": "done_button = page.get_by-role(\"button\", name=\"Done\")" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 149, + 554, + 297, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 554, + 297, + 563 + ], + "spans": [ + { + "bbox": [ + 149, + 554, + 297, + 563 + ], + "type": "text", + "content": "if await done_button.count() > 0:" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 167, + 564, + 279, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 564, + 279, + 573 + ], + "spans": [ + { + "bbox": [ + 167, + 564, + 279, + 573 + ], + "type": "text", + "content": "await done_button.click()" + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 149, + 574, + 171, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 574, + 171, + 582 + ], + "spans": [ + { + "bbox": [ + 149, + 574, + 171, + 582 + ], + "type": "text", + "content": "else:" + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 149, + 583, + 307, + 593 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 583, + 307, + 593 + ], + "spans": [ + { + "bbox": [ + 149, + 583, + 307, + 593 + ], + "type": "text", + "content": "print(\"Done button not found.\")" + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 167, + 594, + 195, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 594, + 195, + 601 + ], + "spans": [ + { + "bbox": [ + 167, + 594, + 195, + 601 + ], + "type": "text", + "content": "return" + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 149, + 604, + 388, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 604, + 388, + 613 + ], + "spans": [ + { + "bbox": [ + 149, + 604, + 388, + 613 + ], + "type": "text", + "content": "save_button = page.get_by-role(\"button\", name=\"Save\")" + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 149, + 613, + 297, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 613, + 297, + 622 + ], + "spans": [ + { + "bbox": [ + 149, + 613, + 297, + 622 + ], + "type": "text", + "content": "if await save_button.count() > 0:" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 167, + 624, + 279, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 624, + 279, + 633 + ], + "spans": [ + { + "bbox": [ + 167, + 624, + 279, + 633 + ], + "type": "text", + "content": "await save_button.click()" + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 149, + 634, + 187, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 634, + 187, + 642 + ], + "spans": [ + { + "bbox": [ + 149, + 634, + 187, + 642 + ], + "type": "text", + "content": "else:" + } + ] + } + ], + "index": 43 + }, + { + "bbox": [ + 167, + 643, + 307, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 643, + 307, + 652 + ], + "spans": [ + { + "bbox": [ + 167, + 643, + 307, + 652 + ], + "type": "text", + "content": "print(\"Save button not found.\")" + } + ] + } + ], + "index": 44 + }, + { + "bbox": [ + 167, + 654, + 195, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 654, + 195, + 662 + ], + "spans": [ + { + "bbox": [ + 167, + 654, + 195, + 662 + ], + "type": "text", + "content": "return" + } + ] + } + ], + "index": 45 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "35" + } + ] + } + ], + "index": 46 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 34 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 116, + 80, + 492, + 293 + ], + "blocks": [ + { + "bbox": [ + 116, + 80, + 492, + 293 + ], + "lines": [ + { + "bbox": [ + 116, + 80, + 492, + 293 + ], + "spans": [ + { + "bbox": [ + 116, + 80, + 492, + 293 + ], + "type": "image", + "image_path": "323149586afd80fbae2733b648b2e5eb7a1c197d854cc5e6542a41dec3475c4b.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 302, + 506, + 336 + ], + "lines": [ + { + "bbox": [ + 104, + 302, + 506, + 336 + ], + "spans": [ + { + "bbox": [ + 104, + 302, + 506, + 336 + ], + "type": "text", + "content": "Figure 6: The agent is unable to successfully verify that the \"add collaborator to project\" works because it does not have access to a list of emails that can appropriately be used as test cases." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 358, + 279, + 371 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 358, + 279, + 371 + ], + "spans": [ + { + "bbox": [ + 105, + 358, + 279, + 371 + ], + "type": "text", + "content": "D.2.2 Creating Functional Test Cases" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 380, + 506, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 380, + 506, + 447 + ], + "spans": [ + { + "bbox": [ + 104, + 380, + 506, + 447 + ], + "type": "text", + "content": "Some skills require interacting with data stored on the website, and their success is conditioned not on whether the UI is accurately interacted with but rather whether the function inputs are realistic. In the following example, the agent attempts to learn how to add collaborators to a Gitlab repository, but is unable to because it does not have a source of real test input data. Future work could explore automatically generating test cases for agents for successful evaluation in dynamic websites." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 462, + 270, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 462, + 270, + 475 + ], + "spans": [ + { + "bbox": [ + 105, + 462, + 270, + 475 + ], + "type": "text", + "content": "D.2.3 Ceiling of Agent Capabilities" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 106, + 485, + 504, + 731 + ], + "blocks": [ + { + "bbox": [ + 106, + 485, + 504, + 731 + ], + "lines": [ + { + "bbox": [ + 106, + 485, + 504, + 731 + ], + "spans": [ + { + "bbox": [ + 106, + 485, + 504, + 731 + ], + "type": "image", + "image_path": "4d17a0e8318ad94249d4c8c424867fc0ef14b19c6bc9d7de9e422335dbf683c9.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "36" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 35 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 106, + 80, + 504, + 511 + ], + "blocks": [ + { + "bbox": [ + 106, + 80, + 504, + 511 + ], + "lines": [ + { + "bbox": [ + 106, + 80, + 504, + 511 + ], + "spans": [ + { + "bbox": [ + 106, + 80, + 504, + 511 + ], + "type": "text", + "content": "- Successfully navigated to the 'Cars for Sale' section, applied filters, and requested quotes for multiple listings. - Encountered issues with automatic navigation using 'Next car' link, resolved by manual URL navigation. import asyncio await page.goto(\"https://www.cars.com/shopping/\") await page.wait_for_load_state(\"networkidle\") await close_overlays(page) await page.get_by_role(\"combobox\", name=\"Make\").select_option(make) await page.get_by-role(\"combobox\", name=\"Model\").select_option(model) show_MATCHes_button = page.get_byRole(\"button\", name=\"Show matches\") if await show_MATCHes_button.count() > 0: await show_MATCHes_button.click() else: print(\"Show matches button not found.\") return await page.wait_for_load_state(\"networkidle\") for _ in range(5): await page.get_byrole(\"textbox\",name " + }, + { + "bbox": [ + 106, + 80, + 504, + 511 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 106, + 80, + 504, + 511 + ], + "type": "text", + "content": " \"First name\").fill(first_name) await page.get_byrole(\"textbox\",name " + }, + { + "bbox": [ + 106, + 80, + 504, + 511 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 106, + 80, + 504, + 511 + ], + "type": "text", + "content": " \"Last name\").fill(last_name) await page.get_byrole(\"textbox\",name " + }, + { + "bbox": [ + 106, + 80, + 504, + 511 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 106, + 80, + 504, + 511 + ], + "type": "text", + "content": " \"Email\").fill(email) await page.get_byrole(\"combobox\",name " + }, + { + "bbox": [ + 106, + 80, + 504, + 511 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 106, + 80, + 504, + 511 + ], + "type": "text", + "content": " \"Subject\").select_option( \"Get a price quote\" ) submit_button = page.get_byrole(\"button\", name=\"Email\") if await submit_button.count() > 0: await submit_button.click() else: print(\"Submit button not found.\") return await asyncio.sleep(2) next_car_link = page.get_byrole(\"link\", name " + }, + { + "bbox": [ + 106, + 80, + 504, + 511 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 106, + 80, + 504, + 511 + ], + "type": "text", + "content": " \"Next car\") if await next_car_link.count() > 0: await next_car_link.click() await page.wait_for_load_state(\"networkidle\") else: print(\"Next car link not found. Navigation stopped.\") break" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_body" + } + ], + "index": 0, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 105, + 522, + 211, + 534 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 522, + 211, + 534 + ], + "spans": [ + { + "bbox": [ + 105, + 522, + 211, + 534 + ], + "type": "text", + "content": "D.2.4 Fail to call API" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 542, + 506, + 612 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 542, + 506, + 612 + ], + "spans": [ + { + "bbox": [ + 104, + 542, + 506, + 612 + ], + "type": "text", + "content": "The agent does not call available APIs even when they are generated during exploration. As shown in Figure 7, for the task \"Save a hamburger recipe,\" the agent should first call search Recipes_by_cuisine_type('hamburger') to obtain recipe details and then select the print option to save the recipes. However, the agent fails to call this API during the task execution, indicating a gap between exploration and execution phases, suggesting the need for improving the agent's policy to better utilize generated APIs." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 617, + 269, + 629 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 617, + 269, + 629 + ], + "spans": [ + { + "bbox": [ + 121, + 617, + 269, + 629 + ], + "type": "text", + "content": "search_recipes_by_cuisine_type" + } + ] + } + ], + "index": 3 + }, + { + "type": "code", + "bbox": [ + 129, + 647, + 484, + 720 + ], + "blocks": [ + { + "bbox": [ + 129, + 647, + 484, + 720 + ], + "lines": [ + { + "bbox": [ + 129, + 647, + 484, + 720 + ], + "spans": [ + { + "bbox": [ + 129, + 647, + 484, + 720 + ], + "type": "text", + "content": "async def searchrecipes_by_cuisine_type(page, cuisine_type):\n \"\" Searches for recipes based on a specified cuisine type on Cookpad. This function automates the process of searching for recipes by entering a cuisine type into the search functionality on the Cookpad homepage." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "python" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 761 + ], + "type": "text", + "content": "37" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 36 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 166, + 80, + 443, + 236 + ], + "blocks": [ + { + "bbox": [ + 166, + 80, + 443, + 236 + ], + "lines": [ + { + "bbox": [ + 166, + 80, + 443, + 236 + ], + "spans": [ + { + "bbox": [ + 166, + 80, + 443, + 236 + ], + "type": "image", + "image_path": "4e011efbefed4e7c4d0b6eee527d126e6082b24752a6e871c38288561817565e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "lines": [ + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "spans": [ + { + "bbox": [ + 104, + 249, + 504, + 273 + ], + "type": "text", + "content": "Figure 7: Screenshot of cookpad.com. The agent fails to call search Recipes_by_cuisine_type('hamburger') to obtain recipe details." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "code", + "bbox": [ + 106, + 291, + 504, + 553 + ], + "blocks": [ + { + "bbox": [ + 106, + 291, + 504, + 553 + ], + "lines": [ + { + "bbox": [ + 106, + 291, + 504, + 553 + ], + "spans": [ + { + "bbox": [ + 106, + 291, + 504, + 553 + ], + "type": "text", + "content": "Args: page: The Playwright page object to interact with. cuisine_type (str): The cuisine type to search for, e.g., 'Italian', 'Mexican', 'Korean', 'Chinese', 'American'. Usage log: - Successfully used to search for 'Italian' cuisine recipes, displaying the correct results. import re await page.get_by_role(\"textbox\", name=\"Search by recipe or ingredients\"). fill( cuisine_type ) search_button = page.get_byrole(\"button\", name " + }, + { + "bbox": [ + 106, + 291, + 504, + 553 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 106, + 291, + 504, + 553 + ], + "type": "text", + "content": " re.compile(\"Search\",re. IgnORECASE)) if await search_button.is Visible(): await search_button.click() else: await pagekeyboard.press(\"Enter\")" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 105, + 563, + 220, + 576 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 563, + 220, + 576 + ], + "spans": [ + { + "bbox": [ + 105, + 563, + 220, + 576 + ], + "type": "text", + "content": "D.2.5 Wrong Parameter" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 582, + 506, + 660 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 506, + 660 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 506, + 660 + ], + "type": "text", + "content": "In some cases, the agent correctly identifies the appropriate API but selects incorrect parameters. For example in Figure 8, in the task \"Browse recipes for gluten-free chocolate chip cookies that can be made without nuts,\" the agent incorrectly uses the parameter search Recipes_by Ingredients(page, 'chocolate chip, -nuts'), whereas the correct parameter should be search Recipes_by_Ingredients(page, 'chocolate chip without nuts'). This indicates that the agent needs better parameter selection logic to enhance performance." + } + ] + } + ], + "index": 4 + }, + { + "type": "code", + "bbox": [ + 106, + 666, + 504, + 730 + ], + "blocks": [ + { + "bbox": [ + 106, + 666, + 504, + 730 + ], + "lines": [ + { + "bbox": [ + 106, + 666, + 504, + 730 + ], + "spans": [ + { + "bbox": [ + 106, + 666, + 504, + 730 + ], + "type": "text", + "content": "search_recipes_by Ingredients\nasync def search_recipes_by Ingredients(page, ingredients):\n \"\"" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "code", + "guess_lang": "python" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "38" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 37 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 116, + 80, + 492, + 293 + ], + "blocks": [ + { + "bbox": [ + 116, + 80, + 492, + 293 + ], + "lines": [ + { + "bbox": [ + 116, + 80, + 492, + 293 + ], + "spans": [ + { + "bbox": [ + 116, + 80, + 492, + 293 + ], + "type": "image", + "image_path": "539761f5c5718c889d442e4ae9ddab9d0c240e025524852374883354e97c7718.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 304, + 504, + 339 + ], + "lines": [ + { + "bbox": [ + 104, + 304, + 504, + 339 + ], + "spans": [ + { + "bbox": [ + 104, + 304, + 504, + 339 + ], + "type": "text", + "content": "Figure 8: Screenshot of cookpad.com. The agent incorrectly uses the parameter search Recipes_by Ingredients(page, 'chocolate chip, -nuts') instead of the correct parameter search Recipes_by Ingredients(page, 'chocolate chip without nuts')" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 106, + 401, + 504, + 718 + ], + "blocks": [ + { + "bbox": [ + 106, + 401, + 504, + 718 + ], + "lines": [ + { + "bbox": [ + 106, + 401, + 504, + 718 + ], + "spans": [ + { + "bbox": [ + 106, + 401, + 504, + 718 + ], + "type": "image", + "image_path": "d5dcc4475ce353b83253ee873efa9f0b7597e17fb11fdb9b5d22ade9898c4263.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 311, + 760 + ], + "type": "text", + "content": "39" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 38 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 111, + 86, + 492, + 304 + ], + "blocks": [ + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "lines": [ + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "spans": [ + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "type": "text", + "content": "async def apply_multiple_filters(page, make, model, distance, zip_code, max_price): # Apply multiple filters on the Cars.com website including make, model, distance, ZIP code, and price range. await page.goto(\"https://www.cars.com/shopping/\") await close_overlays(page) await filterCars_by.make_and_model(page, make, model) await refine_search_by_distance_andzip(page, distance " + }, + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "type": "text", + "content": " distance,zip_code " + }, + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "type": "text", + "content": " zip_code) if max_price and max_price.strip(): price_select " + }, + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "type": "text", + "content": " page.locator(#make-model-max-price') if await price_select.count() " + }, + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "type": "inline_equation", + "content": ">0" + }, + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "type": "text", + "content": " : await price_select.select_option(value " + }, + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "type": "text", + "content": " str(max_price)) else: print(\"Price selector not found\") search_button " + }, + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "type": "text", + "content": " page.get_byRole(\"tabpanel\",name " + }, + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "type": "text", + "content": " \"Make\").locator(\"spark-button[datasearchtype " + }, + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "type": "text", + "content": " 'make']\") if await search_button.count() " + }, + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "type": "inline_equation", + "content": ">0" + }, + { + "bbox": [ + 111, + 86, + 492, + 304 + ], + "type": "text", + "content": " : await search_button.click() else: print(\"Search button not found in make panel\")" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_body" + } + ], + "index": 0, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 138, + 319, + 468, + 333 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 319, + 468, + 333 + ], + "spans": [ + { + "bbox": [ + 138, + 319, + 468, + 333 + ], + "type": "text", + "content": "Figure 9: An example of compositional API that calls multiple other APIs." + } + ] + } + ], + "index": 1, + "type": "text" + }, + { + "bbox": [ + 105, + 351, + 223, + 364 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 351, + 223, + 364 + ], + "spans": [ + { + "bbox": [ + 105, + 351, + 223, + 364 + ], + "type": "text", + "content": "D.3 Compositional API" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 372, + 238, + 385 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 372, + 238, + 385 + ], + "spans": [ + { + "bbox": [ + 105, + 372, + 238, + 385 + ], + "type": "text", + "content": "D.4 Website Roadmap API" + } + ] + } + ], + "index": 3 + }, + { + "type": "code", + "bbox": [ + 111, + 404, + 473, + 552 + ], + "blocks": [ + { + "bbox": [ + 111, + 404, + 473, + 552 + ], + "lines": [ + { + "bbox": [ + 111, + 404, + 473, + 552 + ], + "spans": [ + { + "bbox": [ + 111, + 404, + 473, + 552 + ], + "type": "text", + "content": "async def navigate_toCars_for_sale(page): # Navigate to the 'Cars for Sale' section on the Cars.com website, which contains car sales information. import asyncio await page.goto(\"https://www.cars.com/\" cars_for_sale_link = page.get_by-role(\"navigation\", name=\"main menu\").get_by-role( \"link\", name=\"Cars for Sale\") if await cars_for_sale_link.count() > 0: await cars_for_sale_link.click() await asyncio.sleep(5) else: print(\"Cars for Sale link is not visible.\")" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 104, + 568, + 506, + 604 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 568, + 506, + 604 + ], + "spans": [ + { + "bbox": [ + 104, + 568, + 506, + 604 + ], + "type": "text", + "content": "Figure 10: An example of roadmap information contained in the synthesized APIs. The docstring of this API contains the corresponding functionality of this API. If the agent is new to this website, this function might require some exploration to get." + } + ] + } + ], + "index": 5, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "spans": [ + { + "bbox": [ + 299, + 750, + 312, + 760 + ], + "type": "text", + "content": "40" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 39 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_08xxx/2504.08813/af01798f-8993-43b5-b109-47e5f260520d_content_list.json b/data/2025/2504_08xxx/2504.08813/af01798f-8993-43b5-b109-47e5f260520d_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..43eb5b10459f3d3c4852db39af33018f2525062e --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/af01798f-8993-43b5-b109-47e5f260520d_content_list.json @@ -0,0 +1,1357 @@ +[ + { + "type": "text", + "text": "SafeMLRM: Demystifying Safety in Multi-modal Large Reasoning Models", + "text_level": 1, + "bbox": [ + 200, + 121, + 795, + 172 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Junfeng Fang $^{1}$ , Yukai Wang $^{1}$ , Ruipeng Wang $^{2}$ , Zijun Yao $^{1}$ , Kun Wang $^{3}$ , An Zhang $^{2}$ , Xiang Wang $^{2*}$ , Tat-Seng Chua $^{1}$", + "bbox": [ + 289, + 224, + 705, + 255 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ National University of Singapore, $^{2}$ University of Science and Technology of China \n $^{3}$ Nanyang Technological University \nfangjf1997@gmail.com", + "bbox": [ + 223, + 255, + 774, + 297 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 333, + 537, + 349 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The rapid advancement of multi-modal large reasoning models (MLRMs) — enhanced versions of multimodal language models (MLLMs) equipped with reasoning capabilities — has revolutionized diverse applications. However, their safety implications remain underexplored. While prior work has exposed critical vulnerabilities in unimodal reasoning models, MLRMs introduce distinct risks from cross-modal reasoning pathways. This work presents the first systematic safety analysis of MLRMs through large-scale empirical studies comparing MLRMs with their base MLLMs. Our experiments reveal three critical findings: (1) The Reasoning Tax: Acquiring reasoning capabilities catastrophically degrades inherited safety alignment. MLRMs exhibit $37.44\\%$ higher jailbreaking success rates than base MLLMs under adversarial attacks. (2) Safety Blind Spots: While safety degradation is pervasive, certain scenarios (e.g., Illegal Activity) suffer $25\\times$ higher attack rates — far exceeding the average $3.4\\times$ increase, revealing scenario-specific vulnerabilities with alarming cross-model and datasets consistency. (3) Emergent Self-Correction: Despite tight reasoning-answer safety coupling, MLRMs demonstrate nascent self-correction — $16.9\\%$ of jailbroken reasoning steps are overridden by safe answers, hinting at intrinsic safeguards. These findings underscore the urgency of scenario-aware safety auditing and mechanisms to amplify MLRMs' self-correction potential. To catalyze research, we open-source OpenSafeMLRM, the first toolkit for MLRM safety evaluation, providing unified interface for mainstream models, datasets, and jailbreaking methods. Our work calls for immediate efforts to harden reasoning-augmented AI, ensuring its transformative potential aligns with ethical safeguards.", + "bbox": [ + 228, + 364, + 767, + 681 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/5baac71a839d0492466b5b3bea9347d1027212b1c74ece6233e117dac9f1c557.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 267, + 691, + 289, + 707 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "OpenSafeMLRM", + "bbox": [ + 290, + 693, + 392, + 707 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "https://github.com/fangjf1/OpenSafeMLRM", + "bbox": [ + 406, + 693, + 728, + 707 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 733, + 313, + 750 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "With the rapid advancement of AI, large reasoning models (LRMs) like the DeepSeek series have gained significant attention [1, 2, 3, 4]. These models excel in performing complex tasks through meticulous reasoning, enabling transformative impacts across various downstream domains [5, 6]. Recently, this influence has expanded to multi-modal applications [7, 8, 9]. By embedding chain-of-thought data during the fine-tuning or reinforcement learning phrase of base multi-modal large language models (MLLMs) [10, 11, 12], researchers have developed Multi-modal Large Reasoning Models (MLRMs) [13, 14, 15, 16]. Compared to prompt-based multi-modal reasoning (e.g., instructing base model with \"Please think step-by-step\") [17, 18, 19, 20], MLRMs inherently encode reasoning process, demonstrating greater potential in alignment with human intentions; in contrast to asynchronous multi-modal reasoning paradigm (e.g., using one model to describe an", + "bbox": [ + 169, + 763, + 826, + 904 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.08813v1 [cs.LG] 9 Apr 2025", + "bbox": [ + 22, + 268, + 60, + 700 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding author: xiangwang1223@gmail.com.", + "bbox": [ + 191, + 912, + 511, + 926 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "image for an LRM to reason) [21, 22, 23, 24], MLRMs are end-to-end models rather than cascaded pipelines, avoiding compounding errors and modality-specific information loss. These advantages position MLRM as a cornerstone for future advancements in AI reasoning [25].", + "bbox": [ + 169, + 90, + 823, + 133 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "However, alongside these reasoning advancements, model safety concerns have become increasingly critical [26, 27, 28], particularly the risks of unsafe content generation [29, 30]. Recent community efforts have evaluated the safety capabilities of mainstream LRMs like DeepSeek, especially their resilience to jailbreaking attacks [31, 32, 33]. These studies reveal urgent findings, demonstrating that exposure of reasoning processes amplifies safety risks, with attack success rates rise dramatically [34, 35]. While these findings motivate deeper investigations into LRM safety, the safety protocol of MLRMs remains underexplored. This gap demands imperative attention: multi-modal integration introduces novel attack vectors like cross-modal adversarial triggers, fundamentally expanding the threat surface beyond traditional text-only vulnerabilities [36, 37].", + "bbox": [ + 169, + 138, + 826, + 265 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This study conducts the first systematic safety evaluation of advancing MLRMs. To catalyze this, we open-source OpenSafeMLRM, the first toolkit for MLRM safety evaluation, providing unified interface for various models, datasets, and jailbreaking methods. With OpenSafeMLRM, we evaluate mainstream MLRMs such as R1-Onevision [38], MM-Eureka-Qwen [14], Mulberry-Qwen2VL [39], Mulberry-Llava [39] and Mulberry-Llama [39] across 10 canonical unsafe scenarios curated from MM-SafetyBench [37], and further adapt black-box jailbreaking methods traditionally used for MLLMs [36] for probing MLRMs' safety resilience. All evaluations are applied to both target MLRMs and their base MLLMs. This comparative framework allows us to quantify how reasoning capability acquisition impacts the safety protocols.", + "bbox": [ + 169, + 270, + 826, + 397 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Empirical results reveal several novel findings: (1) Reasoning Tax: The pursuit of advanced reasoning capabilities via supervised fine-tuning (SFT)/reinforcement learning (RL) imposes a steep safety cost — MLRMs exhibit $37.44\\%$ higher attack success rates than their base MLLMs, with safety alignment catastrophically eroded during capability enhancement. (2) Safety Blind Spots: While safety degradation is pervasive, certain scenarios (e.g., Illegal Activity) suffer $25\\times$ higher attack rates — far exceeding the average $3.4\\times$ increase, revealing scenario-specific vulnerabilities with alarming cross-model and datasets consistency. (3) Emergent Self-Correction: Despite tight reasoning-answer safety coupling, MLRMs demonstrate nascent self-correction: $16.23\\%$ of jailbroken reasoning steps are overridden by safe answers, hinting at intrinsic safeguards.", + "bbox": [ + 168, + 401, + 823, + 526 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "While our evaluation framework conduct comprehensive experiments, we acknowledge that selection bias in test samples and evaluation criteria design may inevitably introduce measurement distortions. To mitigate this, we will refine this evaluation framework by incorporating additional datasets, models, attack vectors, and defense baselines [36] to enhance its comprehensiveness and reliability. These findings reveal fundamental gaps in MLRMs' current safety mechanisms and expose the alarming efficacy of adversarial strategies in bypassing their safeguards. In summary, our work calls for immediate efforts to harden reasoning-augmented AI, ensuring its transformative potential aligns with ethical safeguards.", + "bbox": [ + 169, + 532, + 826, + 643 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Experimental Setup", + "text_level": 1, + "bbox": [ + 171, + 667, + 374, + 686 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This work aims to systematically investigate the safety of MLRMs and their safety degradation compared to base MLLMs. Here, we formalize the research aim of this paper and provided specific experimental configurations, including datasets, baseline models and evaluation metrics.", + "bbox": [ + 169, + 700, + 823, + 743 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Research Aim", + "text_level": 1, + "bbox": [ + 197, + 757, + 295, + 771 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our aim is to explore the safety vulnerabilities in MLRMs. Through comprehensive analysis across various unsafe scenarios, we highlight key risks and compare the safety performance of MLRMs against their base MLLMs, providing valuable insights for enhancing their safety.", + "bbox": [ + 196, + 781, + 802, + 825 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 Datasets & Jailbreaking Methods.", + "text_level": 1, + "bbox": [ + 171, + 854, + 450, + 871 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We evaluate our framework on two widely adopted benchmarks for MLLM safety: MM-SafetyBench [37] and SafetyBench [40].", + "bbox": [ + 169, + 883, + 823, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/2fca32aaf7e8dcca1bb4035fe1b80f62456b537f4e8f4441fc1dd307ddd4e2bf.jpg", + "image_caption": [ + "Figure 1: Comparison of ASR and HR scores across various MLRMs and their base MLLMs under vanilla unsafe text queries. The dark blue bars represent the ASR and HR of the base MLLMs. Best viewed in color." + ], + "image_footnote": [], + "bbox": [ + 173, + 88, + 444, + 364 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/524be737f917bffa0d7a4defad3e62688344660ebd82d49f19e779fc51fd4ef3.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 450, + 95, + 602, + 363 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/73d5d2df663c13e44315c732c954870c81e665df6eee922f309b5ac748178580.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 609, + 95, + 692, + 363 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/e20ee32ac3bfeea3c5379d14e5dbc30e5c7bab73b7bd426f988e4f8b24d0ea6d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 699, + 95, + 813, + 363 + ], + "page_idx": 2 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- MM-SafetyBench comprises 13 safety-critical scenarios (e.g., illegal activities, hate speech) that MLLMs should strictly prohibit. Constructed via the QueryRelated pipeline [37], this dataset first generates malicious queries across scenarios, then uses GPT-4 [41] to extract unsafe keywords and synthesize three image types: (1) Stable Diffusion (SD) [42] images: Visually aligned with unsafe keywords. (2) Text-overlaid images: Unsafe text superimposed on benign visuals. (3) Hybrid images: SD-generated visuals combined with adversarial typography. Following [37], we adopt hybrid images (empirically shown to be the most effective jailbreaking method among the three) as the jailbreaking method for evaluation.", + "- SafetyBench covers 10 prohibited topics curated from the OpenAI and Meta's Llama-2 [43] usage policies. Built via the FigStep pipeline [40], it leverages GPT-4 [41] to rewrite queries into instructional formats (e.g., \"Steps to manufacture illegal drugs\"), enumerates them as numbered lists, and converts these into typographic images. These images are then fed to target models to complete missing steps — a second jailbreaking paradigm in our evaluation. To ensure cross-benchmark consistency, we retain the 10 overlapping safety topics between two datasets." + ], + "bbox": [ + 169, + 422, + 826, + 617 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 Baseline MLRMs", + "text_level": 1, + "bbox": [ + 171, + 643, + 334, + 657 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We evaluate five prominent open-source MLRMs spanning diverse base architectures and parameter scales: R1-OneVision [38], MM-EUREKA-Qwen [14], Mulberry-Qwen2-VL [39], Mulberry-LLaVA [39] and Mulberry-LlaMA [39]. These models exhibit R1-style reasoning capabilities through SFT and RL with human feedback alignment. Specifically, they generate intermediate reasoning steps before producing final answers, enhancing performance on complex multi-modal tasks but simultaneously amplifies safety risks due to extended reasoning pathways.", + "bbox": [ + 169, + 674, + 826, + 758 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3 Evaluation Metrics", + "text_level": 1, + "bbox": [ + 171, + 784, + 346, + 797 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We selected two evaluation metrics. The first is Harmfulness Rating (HR), which employs an additional LLM to assess the level of harm in the generated content. Following [44], we adopted a two-step evaluation to calculate HR: Initially, the output undergoes a substring lookup; if the model's response contains phrases like \"Sorry, I can not\", the HR is immediately set to 0. Next, a detailed prompt with specific scoring rules is designed to instruct GPT-4o-mini [45] to evaluate the harmfulness of the model's output according to the rules, using a 0-5 scale where 0 indicates no harmfulness, and 5 indicates extremely harmful content.", + "bbox": [ + 169, + 814, + 825, + 911 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/f2de661e7d249e4c61df89f13c907384e6f0622209aab49011406c1b6d9ff010.jpg", + "image_caption": [ + "Figure 2: Comparison of ASR and HR scores across MLRMs and their base MLLMs under jailbreak attacks. The dark blue bars represent the ASR and HR of the base MLLMs. Best viewed in color." + ], + "image_footnote": [], + "bbox": [ + 173, + 88, + 447, + 362 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/3b9bca542edeca217510c4d7e8e53b59bf563b76e85a29e386e4382336688c4d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 450, + 95, + 602, + 361 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/1fbee6b1ef5f8c6c74b3dc102f6621c06ea51843f6bf84e089618fe23a75e04a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 609, + 95, + 692, + 361 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/dff095cff933feba5a38f3aee9fb251ef563c1d002268138fcfd04d14cdacb37.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 699, + 94, + 813, + 361 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The second metric is Attack Success Rate (ASR), defined as the number of successful attacks divided by the total number of attempted attacks. Formally, for a particular instruction dataset $D$ , we define $ASR = I(Q_i) / |D|$ , where $Q_i$ represents the set of text and image queries, and $I(\\cdot)$ is an indicator function such that $I(\\cdot) = 1$ if the response engages with the malicious query, and $I(\\cdot) = 0$ otherwise. ASR is a common metric for evaluating model safety and robustness. In our experiments, we addressed a potential issue: directly using the LLM as a judge to determine if the model output has been \"jailbroken\" introduces considerable uncertainty. Therefore, based on the detailed guidelines in the aforementioned prompt, we define a jailbreak as successful when the HR score equals 4 or 5.", + "bbox": [ + 169, + 409, + 823, + 521 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Evaluation Dimensions: As there has been no comprehensive evaluation specifically targeting MLRMs, we define two key dimensions for assessment: Result Safety and Overall Safety. These are equally important, as different LRM models may present outcomes in varying formats — either as raw results or as reasoning alongside the results. In the following sections, we will exhibit and analyze the metrics for these two dimensions. Note that while we also report metrics related to Reasoning Safety for investigating the relationship between the safety of the reasoning process and the final output, we consider this secondary to the two primary dimensions.", + "bbox": [ + 169, + 532, + 823, + 630 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Safety Inspection of MLRMs", + "text_level": 1, + "bbox": [ + 171, + 655, + 447, + 672 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Reasoning Tax", + "text_level": 1, + "bbox": [ + 171, + 690, + 316, + 705 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Prior work has observed the \"safety tax\" in reasoning models, i.e., performance deterioration caused by safety alignment [35]. We identify a critical counterpart: the reasoning tax, defined as the safety degradation incurred when augmenting MLLMs with reasoning capabilities through SFT or RL. To quantify this phenomenon, we systematically compare safety metrics between four safety-aligned base MLLMs and five MLRMs derived from them.", + "bbox": [ + 169, + 718, + 823, + 787 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Experimental Protocol. To isolate the impact of reasoning capability acquisition, we adopt a two-stage evaluation: For base MLLMs, we directly assess the outputs for HR and ASR, as these models lack explicit reasoning step generation. For MLRMs, we collect HR and ASR across Think, Answer and Overall processes. Note that although we employed a standard prompt template to enforce the separation of the reasoning and answer processes, some MLRMs (e.g., Mulberry-LlaMA) still struggled to strictly follow the \"Think + Answer\" format in their outputs. For these models, we report their overall safety solely. Figure 1 and 2 exhibit the results under vanilla unsafe text queries and jailbreak attacks, respectively.", + "bbox": [ + 169, + 800, + 826, + 912 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/327d55b58bbda774ffb1eec6ecd34158a8773e8c4f874654e783879ab6338cd3.jpg", + "table_caption": [ + "Table 1: Comparison of HR scores on MM-SafetyBench datasets across various MLRMs and their base MLLMs. For abbreviations, Vani. and Jail. refer to performance under vanilla unsafe text queries and jailbreak attacks, respectively. The dash in the \"Model\" column represents the base MLLMs." + ], + "table_footnote": [], + "table_body": "
BaseModelMethodStageIAHSRAPHFRPOPVLOFAHC
QW2.5-VL[46]-Vani.Overall0.040.280.841.210.301.850.682.674.043.27
Jail.Overall2.482.734.253.833.404.113.683.154.224.18
R1-OV [38]Vani.Think3.102.972.633.493.403.673.103.453.624.23
Answer3.553.293.234.193.924.513.204.104.364.32
Overall3.033.072.703.993.484.403.183.724.354.28
Jail.Think4.123.624.274.354.143.904.123.203.844.20
Answer4.363.864.664.594.514.154.543.714.034.25
Overall4.313.914.614.604.374.324.333.504.224.21
Eureka-QW [14]Vani.Think1.951.973.203.452.704.002.903.333.933.54
Answer2.031.873.343.112.294.152.553.354.043.62
Overall2.232.013.363.532.654.222.973.233.993.90
Jail.Think3.403.124.004.243.903.913.793.194.013.74
Answer2.893.254.304.033.663.823.673.083.913.71
Overall3.393.454.254.243.974.054.013.254.113.91
QW2-VL [47]-Vani.Overall0.050.050.550.790.141.750.361.853.822.25
Jail.Overall3.663.294.344.224.274.394.063.104.053.88
MBerry-QW [39]Vani.Think2.122.342.573.063.122.962.632.573.282.83
Answer2.452.473.323.442.623.953.063.353.883.61
Overall2.502.443.393.552.754.103.033.654.083.82
Jail.Think3.272.873.023.583.452.973.472.382.492.91
Answer4.414.074.484.474.444.084.323.393.863.86
Overall4.484.194.364.574.604.334.453.554.083.99
LMA3-LVA [48]-Vani.Overall0.150.501.451.920.582.450.912.074.072.64
Jail.Overall3.773.154.054.224.053.763.963.323.803.85
MBerry-LVA [39]Vani.Overall1.953.364.413.393.743.802.603.323.993.55
Jail.Overall3.853.523.554.033.773.783.893.253.443.79
LMA-3.2-Vision [43]-Vani.Overall0.100.732.071.760.624.431.093.734.423.63
Jail.Overall0.761.662.181.961.883.592.373.044.173.84
MBerry-LMA [39]Vani.Overall0.100.641.111.310.623.000.763.834.294.23
Jail.Overall3.383.182.983.583.113.653.423.513.564.07
", + "bbox": [ + 181, + 138, + 823, + 657 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Key Findings. Figures 1 and 2 reveal systematic safety degradation across all MLRMs. Specifically,", + "bbox": [ + 171, + 667, + 823, + 683 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Overall Safety Collapse: MLRMs exhibit $31.30\\%$ higher ASR ( $59.52\\%$ vs. base MLLMs' $28.22\\%$ ) and 1.64 higher HR ( $3.07\\%$ vs. 1.43), demonstrating that exposing reasoning chains fundamentally expands attack surfaces.", + "- Persistent Answer Degradation: Even when ignoring unsafe reasoning steps, MLRM answers show $25.89\\%$ higher ASR and 1.30 higher HR compared to base models. This proves safety erosion persists beyond reasoning exposure, suggesting SFT/RL alignment damages intrinsic safeguards.", + "- Architectural Vulnerability: Qwen2.5-based MLRMs suffer catastrophic safety loss (ASR + 40.06%, HR + 2.02), suggesting architectural incompatibility between reasoning and safety mechanisms. Notably, MBerry-LMA exhibits an inverse trend: its safety metrics improve post-reasoning augmentation (ASR - 4.8%, HR - 0.21). We believe this anomaly hints at potential safeguards, and may provide a rare blueprint for designing safety-resilient reasoning models.", + "- Reasoning as Attack Vector: Unsafe reasoning occurs $12.52\\%$ more frequently than unsafe answers, highlighting systemic risks in exposing intermediate reasoning to users." + ], + "bbox": [ + 171, + 686, + 823, + 867 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/c06dc7b23f1c52b1a3bd45fbef39d0292edf091479515efa9369e608e17ed8ca.jpg", + "table_caption": [ + "Table 2: Comparison of HR scores on SafetyBench datasets across various MLRMs and their base MLLMs. For abbreviations, Vani. and Jail. refer to performance under vanilla unsafe text queries and jailbreak attacks, respectively. The dash in the \"Model\" column represents the base MLLMs." + ], + "table_footnote": [], + "table_body": "
BaseModelMethodStageIAHSRAPHFRPOPVLOFAHC
QW2.5-VL [46]-Vani.Overall0.220.420.000.180.501.960.743.943.923.70
Jail.Overall2.782.684.363.044.084.043.143.564.323.62
R1-OV [38]Vani.Think3.302.574.122.943.484.173.103.404.003.22
Answer4.003.193.983.433.944.593.393.763.944.18
Overall3.442.663.843.143.664.302.983.224.063.64
Jail.Think4.123.794.864.584.444.204.523.403.804.34
Answer4.673.884.914.604.514.384.583.673.874.02
Overall4.183.784.884.624.544.484.523.583.883.98
Eureka-QW [14]Vani.Think2.891.613.262.142.184.042.283.303.903.54
Answer2.501.583.021.941.864.142.123.143.643.50
Overall2.501.503.041.901.844.002.303.303.543.28
Jail.Think3.142.843.903.343.464.183.503.383.783.62
Answer3.262.984.123.603.604.143.663.263.723.36
Overall3.322.83.983.523.584.183.543.523.863.60
QW2-VL [47]-Vani.Overall0.280.340.280.060.062.240.763.083.923.68
Jail.Overall3.302.744.823.344.404.483.824.044.284.40
MBerry-QW [39]Vani.Think2.722.423.142.392.623.372.622.713.243.33
Answer2.962.473.452.682.964.102.843.803.653.92
Overall2.882.303.422.463.064.162.683.723.663.86
Jail.Think4.153.954.484.104.314.364.163.733.893.82
Answer4.464.084.804.384.564.664.403.564.004.20
Overall4.694.244.884.424.644.524.463.464.004.18
LMA3-LVA [48]-Vani.Overall0.660.842.260.861.082.601.203.323.863.58
Jail.Overall4.223.064.484.364.243.764.043.323.803.85
MBerry-LVA [39]Vani.Overall1.901.463.681.523.283.482.223.624.043.64
Jail.Overall4.383.564.424.083.944.043.843.383.383.58
LMA-3.2-Vision [43]-Vani.Overall1.781.361.200.721.684.301.763.904.404.00
Jail.Overall2.923.304.263.863.644.223.583.523.983.96
MBerry-LMA [39]Vani.Overall1.180.721.700.881.003.181.123.844.243.64
Jail.Overall3.964.044.744.524.484.463.943.903.984.20
", + "bbox": [ + 181, + 140, + 823, + 656 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Takeaway 3.1: Reasoning Tax in MLRMs", + "text_level": 1, + "bbox": [ + 197, + 666, + 473, + 681 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "SFT/RL-based reasoning acquisition severely compromises safety alignment of base MLLMs, a phenomenon we term the \"reasoning tax\" in MLRMs.", + "bbox": [ + 196, + 691, + 800, + 722 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2 Safety Blind Spots", + "text_level": 1, + "bbox": [ + 171, + 758, + 339, + 773 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We conduct fine-grained analysis to uncover safety blind spots—scenarios where MLRMs exhibit catastrophic safety failures despite base MLLMs achieving near-perfect alignment. We ask: (1) Do MLRMs inherit base models' scenario-specific safety profiles? (2) Does the reasoning tax manifest heterogeneously across scenarios?", + "bbox": [ + 169, + 787, + 823, + 843 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Experimental Protocol. Following [37], we evaluate 10 safety-critical scenarios: Illegal Activity, Hate Speech, Malware Generation, Physical Harm, Fraud, Pornography, Privacy Violence, Legal Opinion, Financial Advice, and Consultation. Comparison of HR scores across various scenarios on MM-SafetyBench and SafetyBench datasets are exhibited in Table 1 and 2, respectively.", + "bbox": [ + 169, + 854, + 826, + 912 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/60217ca26d6c4706bc4f998ce4df4e04632607ededf903ac42f70a86fd91f47d.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 173, + 88, + 339, + 215 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/53e2c0898fa6c870825288302fce6eecd6766b8ee84334a014b1d11362a0eb5f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 351, + 89, + 495, + 215 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/57a25f7f7f4c6c142c2394afd0944b8dcc8625bffd742589d64785330fa33d91.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 90, + 651, + 215 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/96d0037c6b1f0c919e8f64e91c808aa868ab3b2d406f3824f52753c349cc06d0.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 663, + 90, + 821, + 215 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/574b87aa30ef2437f88de2908076b3e19b49865518778ec5150bd86129cf9bef.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 173, + 223, + 341, + 246 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/bc774ec0c51aac450e2ceec06bbe9b1f0ea58d88ce42c26e8fd04ed1ac78bfe8.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 348, + 223, + 500, + 246 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/04693c91aafe664ff917e72897465625489f7fd357aabc415b90861cb283f963.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 223, + 648, + 246 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4cdb68a77c41887d4e526d090e30fd1a2ff06614cb2e14b5073428ed1d868612.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 655, + 214, + 820, + 246 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6356fd790de32a30a73b6fbc40b822e226a13a741a8e1a8e4c327465cad11262.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 173, + 246, + 338, + 358 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/55de58692bb2a404a8a856a334e4abff7819345d1afe93f4fd164a052c994d15.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 348, + 246, + 496, + 358 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/149faa88859aa02e53597cc89337335a33b638b96b121b487ef6244ed69951a5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 506, + 246, + 651, + 358 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/65d048820b8a11abb71c8e4904d33e7f6a604b77a10e35895962f2dc0c344fad.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 655, + 246, + 821, + 358 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/7a14c4ceaa3d3d387e95528365cd5045cb9fb35a9973efd78da0869bd0346884.jpg", + "image_caption": [ + "Figure 3: Comparison of ASR scores across different MLRMs and their base MLLMs. For abbreviations, va. and ja. refer to performance under vanilla unsafe text queries and jailbreak attacks, respectively. We use MB to denote MLRM that are developed with MBerry method." + ], + "image_footnote": [], + "bbox": [ + 184, + 366, + 341, + 388 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/06be97e39e60553cc8f956de76996a567c71d1a7d7613291a13fc8c3397cbf66.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 346, + 366, + 500, + 388 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/6c4e282129807050626985bcb6911ef73c9400789bbd8f7d6f064b92e0437af5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 509, + 366, + 648, + 388 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/91406f4d686d264c572bc7d1bb07d6ec7d28811e9c5b704367ea8ea288d1c7f4.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 655, + 356, + 820, + 388 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Additionally, we provide a more intuitive display for ASR in the form of a radar chart, as shown in Figure 3.", + "bbox": [ + 169, + 448, + 823, + 477 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Key Findings. Experimental results reveal alarming safety blind spots. For instance, in I11legal Activity scenario, MLRMs show $3.79 \\times$ higher ASR than base MLLMs on average. Furthermore, while the base MLLM Qwen2.5-VL achieves near-perfect safety (ASR $< 3\\%$ ), its MLRM derivative R1-Onevision suffers catastrophic failure (ASR $> 50\\%$ ), achieving near $25 \\times$ degradation. This exposes catastrophic alignment erosion in reasoning-enhanced architectures. Overall, safety degradation varies dramatically across scenarios ( $\\Delta$ ASR range: $8.1\\% - 2500\\%$ ), with I11legal Activity/Pornography being most/least affected scenarios, demanding urgent scenario-specific red teaming and adaptive alignment protocols for MLRMs.", + "bbox": [ + 169, + 488, + 826, + 602 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Takeaway 3.2: Safety Blind Spots", + "text_level": 1, + "bbox": [ + 197, + 618, + 423, + 633 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "MLRMs introduce critical safety blind spots — scenarios where base MLLMs excel (e.g., Illegal Activity) become catastrophic vulnerabilities post-reasoning augmentation.", + "bbox": [ + 196, + 643, + 800, + 676 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "3.3 Emergent Self-Correction", + "text_level": 1, + "bbox": [ + 171, + 717, + 393, + 732 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we aim to analyze the relationship between thinking safety and answer safety. This helps uncover potential correlations in the safety resilience across different stages of MLRM outputs, providing insights for constructing hierarchical defense strategies.", + "bbox": [ + 169, + 746, + 826, + 789 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Experimental Protocol. We select three MLRMs (i.e., R1-OneVision, MM-EUREKA-Qwen, Mulberry-Qwen), all strictly following \"Think + Answer\" output formats. From the 10 safety-critical scenarios, we randomly sample 100 adversarial queries per scenario, forming a 1000-sample test set. For each query, we compute Think-HR and Answer-HR under jailbreaking and visualize their normalized joint distribution via 2D heatmaps, as exhibited in Figure 4.", + "bbox": [ + 169, + 800, + 826, + 872 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Key Findings. Figure 4 reveals strong symmetric coupling between Think-HR and Answer-HR. Specifically,", + "bbox": [ + 169, + 882, + 826, + 912 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0f6b250de89a5323a9368b9e1db8598f144284be977a86440193d4c19b46cf1a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 176, + 95, + 356, + 241 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b96251a71647018544dd68520a8308c9c7cfc9cc7569ff38fe7cb1d03bdbe385.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 380, + 95, + 560, + 241 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/98b5b441b4b4a884c9266ad7fc01d2d0e1969952264386748b5b2cf63ca2e5a6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 588, + 94, + 821, + 241 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/ffdc7b83ae60bf79a28deb1a926f46e94e6cc41907599645a0ec1bc82fd49706.jpg", + "image_caption": [ + "Figure 4: The relationship between reasoning safety and answer safety, where the horizontal and vertical axes represent HR scores. The numbers in the color blocks represent the normalized probabilities, with deeper colors indicating higher probabilities. Best viewed in color." + ], + "image_footnote": [], + "bbox": [ + 176, + 258, + 356, + 404 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/feec6998f962b31d78f94fa1bbbcb11311f15d2b33513d67b19b41c78845006e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 382, + 258, + 560, + 404 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/4f65d845f53f43f7c10558a1e1c58dbf484417bc971c452cf7673bb398f6dcc5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 588, + 258, + 821, + 402 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Reasoning-Answer Coupling: When reasoning steps are compromised (Think-HR > 3), answer jailbreaking success rate reaches $90.9\\%$ , indicating that unsafe cognitive frameworks propagate to outputs. Conversely, $93.7\\%$ of compromised answers (Answer-HR > 3) originate from unsafe reasoning steps, suggesting output vulnerabilities necessitate but are not fully determined by reasoning flaws.", + "- Emergent Self-Correction: A subset of MLRMs (e.g., Mulberry-Qwen) exhibit right-upper quadrant clustering in heatmaps, where $12.4\\%$ of unsafe reasoning steps (Think-HR $>3$ ) yield safe answers (Answer-HR $\\leq 3$ ), exhibiting emergent self-correction capabilities in MLRMs. This nascent capability provides a foothold for layered defenses: hardening reasoning steps while amplifying innate safeguards." + ], + "bbox": [ + 171, + 467, + 826, + 606 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Takeaway 3.3: Emergent Self-Correction", + "text_level": 1, + "bbox": [ + 197, + 616, + 467, + 631 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "MLRMs exhibit intrinsic self-correction: $12.4\\%$ of unsafe reasoning chains are overridden by safe answers, revealing preserved safeguards that reject harmful cognition.", + "bbox": [ + 196, + 643, + 799, + 672 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4 Related Work", + "text_level": 1, + "bbox": [ + 171, + 700, + 321, + 715 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Multi-modal Reasoning in Language Models. CoT reasoning process has proven effective for enhancing multi-modal capabilities in language models [25]. Initial approaches focused on prompt engineering such as constructing zero-shot prompts like \"think step-by-step to understand the given text and image inputs\" directly [17]. In contrast, the cascade paradigm achieved multi-modal reasoning asynchronously (e.g., using one model to describe an image for an LRM to reason) [24]. Reinforcement learning-based models such as LLaVA-Reasoner [9] and Insight-V [12] leveraged CoT datasets and direct preference optimization (DPO) with human feedback to acquire intrinsic reasoning skills. The success of DeepSeek-R1 further catalyzed this trend, inspiring derivative architectures such as Easy-R1 [49], R1-OneVision [50], Multimodal-Open-R1 [13], R1-V [51], VLM-R1 [10] and LMM-R1 [52]. Notably, process reward models (PRMs) like MSTaR [241] and VisualPRM [242] represent a paradigm shift from outcome-focused reward models (ORMs). By providing stepwise feedback during reasoning, PRMs enhance self-consistency and enable iterative self-improvement in MLLMs [25].", + "bbox": [ + 169, + 731, + 826, + 910 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Safety Exploration of LRMs. The rapid advancement of LRMs has prompted growing attention to their safety risks. Recent work systematically evaluates mainstream LRMs across multiple dimensions [28, 35, 32, 33]. For example, [53] reveals significant gaps between open-source R1 models and commercial counterparts like o3-mini, while [54] verifies that DeepSeek-R1 failed to successfully block most harmful prompts, exposing critical vulnerabilities under adversarial conditions. Works by [34, 27, 55, 26, 30] further probe the safety boundaries of the LRM and identify their failure modes. Concurrently, researchers have begun investigating attack and defense paradigms specific to LRMs — [29] demonstrates that LRMs are uniquely vulnerable to fine-tuning attacks and [31] critiques the limitations of RL-based safety alignment (e.g., reward hacking, generalization failures) in mitigating harmful outputs. However, these efforts focus narrowly on unimodal LRMs, leaving the safety implications of MLRMs largely unexplored — a critical gap given the distinct risks introduced by cross-modal interactions inherent to MLRM architectures.", + "bbox": [ + 169, + 90, + 826, + 257 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 171, + 276, + 302, + 292 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "The rapid integration of reasoning capabilities into MLLMs has birthed powerful MLRMs with transformative potential. However, our systematic evaluation reveals that this advancement comes at a profound cost to safety. Through large-scale empirical studies comparing MLRMs with their base MLLMs, we uncover three critical insights: (1) A reasoning tax: MLRMs suffer $37.44\\%$ higher jailbreaking rates than base MLLMs due to eroded safety alignment; (2) Scenario-specific blind spots: risks spike $25\\times$ in contexts like Illegal Activity, far exceeding average degradation; (3) Emergent self-correction: Despite tight reasoning- answer safety coupling, MLRMs demonstrate nascent self-correction. That is, $16.23\\%$ of jailbroken reasoning steps are overridden by safe answers, hinting at intrinsic safeguards. To catalyze mitigation, we release OpenSafeMLRM, an open-source toolkit with unified interface for evaluating mainstream MLRMs, datasets, and attacks. These findings mandate urgent efforts to govern how multi-modal AI reasons ensuring capability advances align with ethical imperatives.", + "bbox": [ + 169, + 306, + 826, + 474 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 171, + 492, + 267, + 508 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, and S. S. Li. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. CoRR, abs/2501.12948, 2025.", + "[2] DeepSeek-AI, Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Daya Guo, Dejian Yang, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Haowei Zhang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Li, Hui Qu, J. L. Cai, Jian Liang, Jianzhong Guo, Jiaqi Ni, Jiashi Li, Jiawei Wang, Jin Chen, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, Junxiao Song, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Lei Xu, Leyi Xia, Liang Zhao, Litong Wang, Liyue Zhang, Meng Li, Miaojun Wang, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Mingming Li, Ning Tian, Panpan Huang, Peiyi Wang, Peng Zhang, Qiancheng Wang, Qihao Zhu, Qinyu Chen, Qiushi Du, R. J. Chen, R. L. Jin, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, Runxin Xu, Ruoyu Zhang, Ruyi Chen, S. S. Li, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shaoqing Wu" + ], + "bbox": [ + 179, + 515, + 826, + 912 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Shengfeng Ye, Shengfeng Ye, Shirong Ma, Shiyu Wang, Shuang Zhou, Shuiping Yu, Shunfeng Zhou, Shuting Pan, T. Wang, Tao Yun, Tian Pei, Tianyu Sun, W. L. Xiao, and Wangding Zeng. Deepseek-v3 technical report. CoRR, abs/2412.19437, 2024.", + "[3] OpenAI. Learning to reason with LLMs. 2024.", + "[4] Edward Y. Chang, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. Demystifying long chain-of-thought reasoning in llms. CoRR, abs/2502.03373, 2025.", + "[5] Daya Guo, Qihao Zhu, Dejian Yang, Zhenda Xie, Kai Dong, Wentao Zhang, Guanting Chen, Xiao Bi, Yu Wu, YK Li, et al. Deepseek-coder: When the large language model meets programming-the rise of code intelligence. arXiv preprint arXiv:2401.14196, 2024.", + "[6] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024.", + "[7] Jingyi Zhang, Jiaxing Huang, Huanjin Yao, Shunyu Liu, Xikun Zhang, Shijian Lu, and Dacheng Tao. R1-vl: Learning to reason with multimodal large language models via stepwise group relative policy optimization. 2025. arXiv preprint arXiv:2503.12397.", + "[8] Hengguang Zhou, Xinui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. R1-zero's \"aha moment\" in visual reasoning on a 2b non-sft model. 2025.", + "[9] Ruohong Zhang, Bowen Zhang, Yanghao Li, Haotian Zhang, Zhiqing Sun, Zhe Gan, Yinfei Yang, Ruoming Pang, and Yiming Yang. Improve vision language model chain-of-thought reasoning. arXiv preprint arXiv:2410.16198, 2024.", + "[10] Haozhan Shen, Zilun Zhang, Qianqian Zhang, Ruochen Xu, and Tiancheng Zhao. Vlm-r1: A stable and generalizable r1-style large vision-language model. 2025. Accessed: 2025-02-15.", + "[11] Liu Yuqi, Peng Bohao, Zhong Zhisheng, Yue Zihao, Lu Fanbin, Yu Bei, and Jia Jiaya. Seg-zero: Reasoning-chain guided segmentation via cognitive reinforcement. 2025.", + "[12] Yuhao Dong, Zuyan Liu, Hai-Long Sun, Jingkang Yang, Winston Hu, Yongming Rao, and Ziwei Liu. Insight-v: Exploring long-chain visual reasoning with multimodal large language models. arXiv preprint arXiv:2411.14432, 2024.", + "[13] EvolvingLMMs Lab. Multimodal open r1. GitHub repository, 2025. Accessed: 2025-02-28.", + "[14] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfen Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, Ping Luo, Yu Qiao, Qiaosheng Zhang, and Wenqi Shao. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. 2025.", + "[15] Zhangquan Chen, Xufang Luo, and Dongsheng Li. Visrl: Intention-driven visual perception via reinforced reasoning. 2025. arXiv preprint arXiv:2503.07523.", + "[16] Jiaxing Zhao, Xihan Wei, and Liefeng Bo. R1-omni: Explainable omni-multimodal emotion recognition with reinforcement learning. arXiv e-prints, pages arXiv-2503, 2025.", + "[17] Vaishnavi Himakunthala, Andy Ouyang, Daniel Rose, Ryan He, Alex Mei, Yujie Lu, Chinmay Sonar, Michael Saxon, and William Yang Wang. Let's think frame by frame with VIP: A video infilling and prediction dataset for evaluating video chain-of-thought. arXiv preprint arXiv:2305.13903, 2023.", + "[18] Fanxu Meng, Haotong Yang, Yiding Wang, and Muhan Zhang. Chain of images for intuitively reasoning. arXiv preprint arXiv:2311.09241, 2023.", + "[19] Jingran Xie, Shun Lei, Yue Yu, Yang Xiang, Hui Wang, Xixin Wu, and Zhiyong Wu. Leveraging chain of thought towards empathetic spoken dialogue without corresponding question answering data. arXiv preprint arXiv:2501.10937, 2025." + ], + "bbox": [ + 171, + 90, + 825, + 912 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[20] Xuewen Luo, Fan Ding, Yinsheng Song, Xiaofeng Zhang, and Junnyong Loo. Pkrd-cot: A unified chain-of-thought prompting for multi-modal large language models in autonomous driving. arXiv preprint arXiv:2412.02025, 2024.", + "[21] Haojie Zheng, Tianyang Xu, Hanchi Sun, Shu Pu, Ruoxi Chen, and Lichao Sun. Thinking before looking: Improving multimodal llm reasoning via mitigating visual hallucination. arXiv preprint arXiv:2411.12591, 2024.", + "[22] Timin Gao, Peixian Chen, Mengdan Zhang, Chaoyou Fu, Yunhang Shen, Yan Zhang, Shengchuan Zhang, Xiawu Zheng, Xing Sun, Liujuan Cao, et al. Cantor: Inspiring multimodal chain-of-thought of mllm. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 9096-9105, 2024.", + "[23] Wenshan Wu, Shaoguang Mao, Yadong Zhang, Yan Xia, Li Dong, Lei Cui, and Furu Wei. Mind's eye of llms: Visualization-of-thought elicits spatial reasoning in large language models. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024.", + "[24] Bozhi Luan, Hao Feng, Hong Chen, Yonghui Wang, Wengang Zhou, and Houqiang Li. Textcot: Zoom in for enhanced multimodal text-rich image understanding. arXiv preprint arXiv:2404.09797, 2024.", + "[25] Yaoting Wang, Shengqiong Wu, Yuecheng Zhang, William Wang, Ziwei Liu, Jiebo Luo, and Hao Fei. Multimodal chain-of-thought reasoning: A comprehensive survey. arXiv preprint arXiv:2503.12605, 2025.", + "[26] Chatterbox. Gen ai safety model comparison and reasoning results, 2025.", + "[27] Adversa.ai. Introducing safety aligned deepseek r1 model by enkrypt ai, 2025.", + "[28] Wenjing Zhang, Xuejiao Lei, Zhaoxiang Liu, Ning Wang, Zhenhong Long, Peijun Yang, Jiaojiao Zhao, Minjie Hua, Chaoyang Ma, Kai Wang, et al. Safety evaluation of deepseek models in chinese contexts. arXiv preprint arXiv:2502.11137, 2025.", + "[29] Zhiyuan Xu, Joseph Gardiner, and Sana Belguith. The dark deep side of deepseek: Fine-tuning attacks against the safety alignment of cot-enabled models. arXiv preprint arXiv:2502.01225, 2025.", + "[30] Haoyu Wang, Zeyu Qin, Li Shen, Xueqian Wang, Minhao Cheng, and Dacheng Tao. Leveraging reasoning with guidelines to elicit and utilize knowledge for enhancing safety alignment. arXiv preprint arXiv:2502.04040, 2025.", + "[31] Manojkumar Parmar and Yuvaraj Govindarajulu. Challenges in ensuring ai safety in deepseek-r1 models: The shortcomings of reinforcement learning strategies. arXiv preprint arXiv:2501.17030, 2025.", + "[32] Abhinav Kumar, Jaechul Roh, Ali Naseh, Marzena Karpinska, Mohit Iyyer, Amir Houmansadr, and Eugene Bagdasarian. Overthink: Slowdown attacks on reasoning llms. arXiv e-prints, page arXiv-2502, February 2025.", + "[33] Qiguang Chen, Libo Qin, Jiaqi Wang, Jingxuan Zhou, and Wanxiang Che. Unlocking the capabilities of thought: A reasoning boundary framework to quantify and optimize chain-of-thought. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024.", + "[34] Zonghao Ying, Guangyi Zheng, Yongxin Huang, Deyue Zhang, Wenxin Zhang, Quanchen Zou, Aishan Liu, Xianglong Liu, and Dacheng Tao. Towards understanding the safety boundaries of deepseek models: Evaluation and findings. arXiv preprint arXiv:2503.15092, 2025.", + "[35] Tiansheng Huang, Sihao Hu, Fatih Ilhan, Selim Furkan Tekin, Zachary Yahn, Yichang Xu, and Ling Liu. Safety tax: Safety alignment makes your large reasoning models less reasonable. arXiv preprint arXiv:2503.00555, 2025.", + "[36] Mang Ye, Xuankun Rong, Wenke Huang, Bo Du, Nenghai Yu, and Dacheng Tao. A survey of safety on large vision-language models: Attacks, defenses and evaluations. CoRR, abs/2502.14881, 2025." + ], + "bbox": [ + 173, + 90, + 825, + 912 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[37] Xin Liu, Yichen Zhu, Jindong Gu, Yunshi Lan, Chao Yang, and Yu Qiao. Mm-safetybench: A benchmark for safety evaluation of multimodal large language models. In ECCV (56), volume 15114 of Lecture Notes in Computer Science, pages 386-403. Springer, 2024.", + "[38] Yi Yang, Xiaoxuan He, Hongkun Pan, Xiyan Jiang, Yan Deng, Xingtao Yang, Haoyu Lu, Dacheng Yin, Fengyun Rao, Minfeng Zhu, Bo Zhang, and Wei Chen. R1-onevision: Advancing generalized multimodal reasoning through cross-modal formalization. arXiv preprint arXiv:2503.10615, 2025.", + "[39] Huanjin Yao, Jiaxing Huang, Wenhao Wu, Jingyi Zhang, Yibo Wang, Shunyu Liu, Yingjie Wang, Yuxin Song, Haocheng Feng, Li Shen, et al. Mulberry: Empowering mllm with o1-like reasoning and reflection via collective monte carlo tree search. arXiv preprint arXiv:2412.18319, 2024.", + "[40] Yichen Gong, Delong Ran, Jinyuan Liu, Conglei Wang, Tianshuo Cong, Anyu Wang, Sisi Duan, and Xiaoyun Wang. Figstep: Jailbreaking large vision-language models via typographic visual prompts. arXiv preprint arXiv:2311.05608, 2023.", + "[41] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023.", + "[42] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022.", + "[43] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Jasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023.", + "[44] Yu Wang, Xiaofei Zhou, Yichen Wang, Geyuan Zhang, and Tianxing He. Jailbreak large visual language models through multi-modal linkage. arXiv preprint arXiv:2412.00473, 2024.", + "[45] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024.", + "[46] Qwen Team. Qwen2.5-vl, January 2025.", + "[47] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024.", + "[48] Bo Li, Kaichen Zhang, Hao Zhang, Dong Guo, Renrui Zhang, Feng Li, Yuanhan Zhang, Ziwei Liu, and Chunyuan Li. Llava-last: Stronger llms supercharge multimodal capabilities in the wild, May 2024.", + "[49] Zheng Yaowei, Lu Junting, Wang Shenzhi, Feng Zhangchi, Kuang Dongdong, and Xiong Yuwen. Easyr1: An efficient, scalable, multi-modality rl training framework. 2025.", + "[50] Yangyi VAI. R1-onevision: Open-source multimodal large language model with reasoning. Online, 2025. Available at: https://yangyi-vali.notion.site/r1-onevision#198b1e4047f780c78306fb451be7160d.", + "[51] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. 2025. Accessed: 2025-02-02.", + "[52] Peng Yingzhe, Zhang Gongrui, Zhang Miaosen, You Zhiyuan, Liu Jie, Zhu Qipeng, Yang Kai, Xu Xingzhong, Geng Xin, and Yang Xu. Lmm-r1: Empowering 3b lmm's with strong reasoning abilities through two-stage rule-based rl. 2025." + ], + "bbox": [ + 173, + 90, + 826, + 911 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[53] Aitor Arrieta, Miriam Ugarte, Pablo Valle, José Antonio Parejo, and Sergio Segura. O3-mini vs deepseek-r1: Which one is safer? arXiv preprint arXiv:2501.18438, 2025.", + "[54] Mantas Mazeika, Long Phan, Xuwang Yin, Andy Zou, Zifan Wang, Norman Mu, Elham Sakhaaee, Nathaniel Li, Steven Basart, Bo Li, et al. Harmbench: A standardized evaluation framework for automated red teaming and robust refusal. arXiv preprint arXiv:2402.04249, 2024.", + "[55] Kaiwen Zhou, Chengzhi Liu, Xuandong Zhao, Shreedhar Jangam, Jayanth Srinivasa, Gaowen Liu, Dawn Song, and Xin Eric Wang. The hidden risks of large reasoning models: A safety assessment of r1. arXiv preprint arXiv:2502.12659, 2025." + ], + "bbox": [ + 173, + 90, + 825, + 234 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 12 + } +] \ No newline at end of file diff --git a/data/2025/2504_08xxx/2504.08813/af01798f-8993-43b5-b109-47e5f260520d_model.json b/data/2025/2504_08xxx/2504.08813/af01798f-8993-43b5-b109-47e5f260520d_model.json new file mode 100644 index 0000000000000000000000000000000000000000..5941efa999a1ac1608f418ba4bb901b2eeafff05 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/af01798f-8993-43b5-b109-47e5f260520d_model.json @@ -0,0 +1,1975 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.27, + 0.061, + 0.701 + ], + "angle": 270, + "content": "arXiv:2504.08813v1 [cs.LG] 9 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.201, + 0.122, + 0.797, + 0.174 + ], + "angle": 0, + "content": "SafeMLRM: Demystifying Safety in Multi-modal Large Reasoning Models" + }, + { + "type": "text", + "bbox": [ + 0.29, + 0.225, + 0.707, + 0.256 + ], + "angle": 0, + "content": "Junfeng Fang\\(^{1}\\), Yukai Wang\\(^{1}\\), Ruipeng Wang\\(^{2}\\), Zijun Yao\\(^{1}\\), Kun Wang\\(^{3}\\), An Zhang\\(^{2}\\), Xiang Wang\\(^{2*}\\), Tat-Seng Chua\\(^{1}\\)" + }, + { + "type": "text", + "bbox": [ + 0.225, + 0.256, + 0.776, + 0.298 + ], + "angle": 0, + "content": "\\(^{1}\\)National University of Singapore, \\(^{2}\\)University of Science and Technology of China \n\\(^{3}\\)Nanyang Technological University \nfangjf1997@gmail.com" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.334, + 0.538, + 0.35 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.365, + 0.768, + 0.683 + ], + "angle": 0, + "content": "The rapid advancement of multi-modal large reasoning models (MLRMs) — enhanced versions of multimodal language models (MLLMs) equipped with reasoning capabilities — has revolutionized diverse applications. However, their safety implications remain underexplored. While prior work has exposed critical vulnerabilities in unimodal reasoning models, MLRMs introduce distinct risks from cross-modal reasoning pathways. This work presents the first systematic safety analysis of MLRMs through large-scale empirical studies comparing MLRMs with their base MLLMs. Our experiments reveal three critical findings: (1) The Reasoning Tax: Acquiring reasoning capabilities catastrophically degrades inherited safety alignment. MLRMs exhibit \\(37.44\\%\\) higher jailbreaking success rates than base MLLMs under adversarial attacks. (2) Safety Blind Spots: While safety degradation is pervasive, certain scenarios (e.g., Illegal Activity) suffer \\(25\\times\\) higher attack rates — far exceeding the average \\(3.4\\times\\) increase, revealing scenario-specific vulnerabilities with alarming cross-model and datasets consistency. (3) Emergent Self-Correction: Despite tight reasoning-answer safety coupling, MLRMs demonstrate nascent self-correction — \\(16.9\\%\\) of jailbroken reasoning steps are overridden by safe answers, hinting at intrinsic safeguards. These findings underscore the urgency of scenario-aware safety auditing and mechanisms to amplify MLRMs' self-correction potential. To catalyze research, we open-source OpenSafeMLRM, the first toolkit for MLRM safety evaluation, providing unified interface for mainstream models, datasets, and jailbreaking methods. Our work calls for immediate efforts to harden reasoning-augmented AI, ensuring its transformative potential aligns with ethical safeguards." + }, + { + "type": "image", + "bbox": [ + 0.268, + 0.692, + 0.29, + 0.708 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.291, + 0.694, + 0.393, + 0.708 + ], + "angle": 0, + "content": "OpenSafeMLRM" + }, + { + "type": "text", + "bbox": [ + 0.408, + 0.694, + 0.729, + 0.708 + ], + "angle": 0, + "content": "https://github.com/fangjf1/OpenSafeMLRM" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.734, + 0.314, + 0.75 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.765, + 0.827, + 0.905 + ], + "angle": 0, + "content": "With the rapid advancement of AI, large reasoning models (LRMs) like the DeepSeek series have gained significant attention [1, 2, 3, 4]. These models excel in performing complex tasks through meticulous reasoning, enabling transformative impacts across various downstream domains [5, 6]. Recently, this influence has expanded to multi-modal applications [7, 8, 9]. By embedding chain-of-thought data during the fine-tuning or reinforcement learning phrase of base multi-modal large language models (MLLMs) [10, 11, 12], researchers have developed Multi-modal Large Reasoning Models (MLRMs) [13, 14, 15, 16]. Compared to prompt-based multi-modal reasoning (e.g., instructing base model with \"Please think step-by-step\") [17, 18, 19, 20], MLRMs inherently encode reasoning process, demonstrating greater potential in alignment with human intentions; in contrast to asynchronous multi-modal reasoning paradigm (e.g., using one model to describe an" + }, + { + "type": "page_footnote", + "bbox": [ + 0.192, + 0.913, + 0.513, + 0.927 + ], + "angle": 0, + "content": "*Corresponding author: xiangwang1223@gmail.com." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.135 + ], + "angle": 0, + "content": "image for an LRM to reason) [21, 22, 23, 24], MLRMs are end-to-end models rather than cascaded pipelines, avoiding compounding errors and modality-specific information loss. These advantages position MLRM as a cornerstone for future advancements in AI reasoning [25]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.14, + 0.827, + 0.266 + ], + "angle": 0, + "content": "However, alongside these reasoning advancements, model safety concerns have become increasingly critical [26, 27, 28], particularly the risks of unsafe content generation [29, 30]. Recent community efforts have evaluated the safety capabilities of mainstream LRMs like DeepSeek, especially their resilience to jailbreaking attacks [31, 32, 33]. These studies reveal urgent findings, demonstrating that exposure of reasoning processes amplifies safety risks, with attack success rates rise dramatically [34, 35]. While these findings motivate deeper investigations into LRM safety, the safety protocol of MLRMs remains underexplored. This gap demands imperative attention: multi-modal integration introduces novel attack vectors like cross-modal adversarial triggers, fundamentally expanding the threat surface beyond traditional text-only vulnerabilities [36, 37]." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.271, + 0.828, + 0.398 + ], + "angle": 0, + "content": "This study conducts the first systematic safety evaluation of advancing MLRMs. To catalyze this, we open-source OpenSafeMLRM, the first toolkit for MLRM safety evaluation, providing unified interface for various models, datasets, and jailbreaking methods. With OpenSafeMLRM, we evaluate mainstream MLRMs such as R1-Onevision [38], MM-Eureka-Qwen [14], Mulberry-Qwen2VL [39], Mulberry-Llava [39] and Mulberry-Llama [39] across 10 canonical unsafe scenarios curated from MM-SafetyBench [37], and further adapt black-box jailbreaking methods traditionally used for MLLMs [36] for probing MLRMs' safety resilience. All evaluations are applied to both target MLRMs and their base MLLMs. This comparative framework allows us to quantify how reasoning capability acquisition impacts the safety protocols." + }, + { + "type": "text", + "bbox": [ + 0.169, + 0.402, + 0.825, + 0.527 + ], + "angle": 0, + "content": "Empirical results reveal several novel findings: (1) Reasoning Tax: The pursuit of advanced reasoning capabilities via supervised fine-tuning (SFT)/reinforcement learning (RL) imposes a steep safety cost — MLRMs exhibit \\(37.44\\%\\) higher attack success rates than their base MLLMs, with safety alignment catastrophically eroded during capability enhancement. (2) Safety Blind Spots: While safety degradation is pervasive, certain scenarios (e.g., Illegal Activity) suffer \\(25\\times\\) higher attack rates — far exceeding the average \\(3.4\\times\\) increase, revealing scenario-specific vulnerabilities with alarming cross-model and datasets consistency. (3) Emergent Self-Correction: Despite tight reasoning-answer safety coupling, MLRMs demonstrate nascent self-correction: \\(16.23\\%\\) of jailbroken reasoning steps are overridden by safe answers, hinting at intrinsic safeguards." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.533, + 0.828, + 0.644 + ], + "angle": 0, + "content": "While our evaluation framework conduct comprehensive experiments, we acknowledge that selection bias in test samples and evaluation criteria design may inevitably introduce measurement distortions. To mitigate this, we will refine this evaluation framework by incorporating additional datasets, models, attack vectors, and defense baselines [36] to enhance its comprehensiveness and reliability. These findings reveal fundamental gaps in MLRMs' current safety mechanisms and expose the alarming efficacy of adversarial strategies in bypassing their safeguards. In summary, our work calls for immediate efforts to harden reasoning-augmented AI, ensuring its transformative potential aligns with ethical safeguards." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.668, + 0.375, + 0.687 + ], + "angle": 0, + "content": "2 Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.702, + 0.825, + 0.744 + ], + "angle": 0, + "content": "This work aims to systematically investigate the safety of MLRMs and their safety degradation compared to base MLLMs. Here, we formalize the research aim of this paper and provided specific experimental configurations, including datasets, baseline models and evaluation metrics." + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.758, + 0.296, + 0.772 + ], + "angle": 0, + "content": "Research Aim" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.782, + 0.803, + 0.826 + ], + "angle": 0, + "content": "Our aim is to explore the safety vulnerabilities in MLRMs. Through comprehensive analysis across various unsafe scenarios, we highlight key risks and compare the safety performance of MLRMs against their base MLLMs, providing valuable insights for enhancing their safety." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.856, + 0.452, + 0.872 + ], + "angle": 0, + "content": "2.1 Datasets & Jailbreaking Methods." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "We evaluate our framework on two widely adopted benchmarks for MLLM safety: MM-SafetyBench [37] and SafetyBench [40]." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.174, + 0.089, + 0.446, + 0.365 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.452, + 0.097, + 0.603, + 0.364 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.61, + 0.097, + 0.693, + 0.364 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.7, + 0.096, + 0.815, + 0.364 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.17, + 0.371, + 0.825, + 0.414 + ], + "angle": 0, + "content": "Figure 1: Comparison of ASR and HR scores across various MLRMs and their base MLLMs under vanilla unsafe text queries. The dark blue bars represent the ASR and HR of the base MLLMs. Best viewed in color." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.424, + 0.825, + 0.534 + ], + "angle": 0, + "content": "- MM-SafetyBench comprises 13 safety-critical scenarios (e.g., illegal activities, hate speech) that MLLMs should strictly prohibit. Constructed via the QueryRelated pipeline [37], this dataset first generates malicious queries across scenarios, then uses GPT-4 [41] to extract unsafe keywords and synthesize three image types: (1) Stable Diffusion (SD) [42] images: Visually aligned with unsafe keywords. (2) Text-overlaid images: Unsafe text superimposed on benign visuals. (3) Hybrid images: SD-generated visuals combined with adversarial typography. Following [37], we adopt hybrid images (empirically shown to be the most effective jailbreaking method among the three) as the jailbreaking method for evaluation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.535, + 0.827, + 0.618 + ], + "angle": 0, + "content": "- SafetyBench covers 10 prohibited topics curated from the OpenAI and Meta's Llama-2 [43] usage policies. Built via the FigStep pipeline [40], it leverages GPT-4 [41] to rewrite queries into instructional formats (e.g., \"Steps to manufacture illegal drugs\"), enumerates them as numbered lists, and converts these into typographic images. These images are then fed to target models to complete missing steps — a second jailbreaking paradigm in our evaluation. To ensure cross-benchmark consistency, we retain the 10 overlapping safety topics between two datasets." + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.424, + 0.827, + 0.618 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.645, + 0.336, + 0.658 + ], + "angle": 0, + "content": "2.2 Baseline MLRMs" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.675, + 0.827, + 0.759 + ], + "angle": 0, + "content": "We evaluate five prominent open-source MLRMs spanning diverse base architectures and parameter scales: R1-OneVision [38], MM-EUREKA-Qwen [14], Mulberry-Qwen2-VL [39], Mulberry-LLaVA [39] and Mulberry-LlaMA [39]. These models exhibit R1-style reasoning capabilities through SFT and RL with human feedback alignment. Specifically, they generate intermediate reasoning steps before producing final answers, enhancing performance on complex multi-modal tasks but simultaneously amplifies safety risks due to extended reasoning pathways." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.785, + 0.347, + 0.799 + ], + "angle": 0, + "content": "2.3 Evaluation Metrics" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.815, + 0.826, + 0.912 + ], + "angle": 0, + "content": "We selected two evaluation metrics. The first is Harmfulness Rating (HR), which employs an additional LLM to assess the level of harm in the generated content. Following [44], we adopted a two-step evaluation to calculate HR: Initially, the output undergoes a substring lookup; if the model's response contains phrases like \"Sorry, I can not\", the HR is immediately set to 0. Next, a detailed prompt with specific scoring rules is designed to instruct GPT-4o-mini [45] to evaluate the harmfulness of the model's output according to the rules, using a 0-5 scale where 0 indicates no harmfulness, and 5 indicates extremely harmful content." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.174, + 0.089, + 0.449, + 0.363 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.452, + 0.097, + 0.603, + 0.362 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.61, + 0.097, + 0.693, + 0.362 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.7, + 0.095, + 0.815, + 0.362 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.37, + 0.825, + 0.4 + ], + "angle": 0, + "content": "Figure 2: Comparison of ASR and HR scores across MLRMs and their base MLLMs under jailbreak attacks. The dark blue bars represent the ASR and HR of the base MLLMs. Best viewed in color." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.41, + 0.825, + 0.522 + ], + "angle": 0, + "content": "The second metric is Attack Success Rate (ASR), defined as the number of successful attacks divided by the total number of attempted attacks. Formally, for a particular instruction dataset \\( D \\), we define \\( ASR = I(Q_i) / |D| \\), where \\( Q_i \\) represents the set of text and image queries, and \\( I(\\cdot) \\) is an indicator function such that \\( I(\\cdot) = 1 \\) if the response engages with the malicious query, and \\( I(\\cdot) = 0 \\) otherwise. ASR is a common metric for evaluating model safety and robustness. In our experiments, we addressed a potential issue: directly using the LLM as a judge to determine if the model output has been \"jailbroken\" introduces considerable uncertainty. Therefore, based on the detailed guidelines in the aforementioned prompt, we define a jailbreak as successful when the HR score equals 4 or 5." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.533, + 0.825, + 0.631 + ], + "angle": 0, + "content": "Evaluation Dimensions: As there has been no comprehensive evaluation specifically targeting MLRMs, we define two key dimensions for assessment: Result Safety and Overall Safety. These are equally important, as different LRM models may present outcomes in varying formats — either as raw results or as reasoning alongside the results. In the following sections, we will exhibit and analyze the metrics for these two dimensions. Note that while we also report metrics related to Reasoning Safety for investigating the relationship between the safety of the reasoning process and the final output, we consider this secondary to the two primary dimensions." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.656, + 0.449, + 0.673 + ], + "angle": 0, + "content": "3 Safety Inspection of MLRMs" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.691, + 0.317, + 0.706 + ], + "angle": 0, + "content": "3.1 Reasoning Tax" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.719, + 0.825, + 0.789 + ], + "angle": 0, + "content": "Prior work has observed the \"safety tax\" in reasoning models, i.e., performance deterioration caused by safety alignment [35]. We identify a critical counterpart: the reasoning tax, defined as the safety degradation incurred when augmenting MLLMs with reasoning capabilities through SFT or RL. To quantify this phenomenon, we systematically compare safety metrics between four safety-aligned base MLLMs and five MLRMs derived from them." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.801, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Experimental Protocol. To isolate the impact of reasoning capability acquisition, we adopt a two-stage evaluation: For base MLLMs, we directly assess the outputs for HR and ASR, as these models lack explicit reasoning step generation. For MLRMs, we collect HR and ASR across Think, Answer and Overall processes. Note that although we employed a standard prompt template to enforce the separation of the reasoning and answer processes, some MLRMs (e.g., Mulberry-LlaMA) still struggled to strictly follow the \"Think + Answer\" format in their outputs. For these models, we report their overall safety solely. Figure 1 and 2 exhibit the results under vanilla unsafe text queries and jailbreak attacks, respectively." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.172, + 0.098, + 0.825, + 0.14 + ], + "angle": 0, + "content": "Table 1: Comparison of HR scores on MM-SafetyBench datasets across various MLRMs and their base MLLMs. For abbreviations, Vani. and Jail. refer to performance under vanilla unsafe text queries and jailbreak attacks, respectively. The dash in the \"Model\" column represents the base MLLMs." + }, + { + "type": "table", + "bbox": [ + 0.182, + 0.14, + 0.825, + 0.659 + ], + "angle": 0, + "content": "
BaseModelMethodStageIAHSRAPHFRPOPVLOFAHC
QW2.5-VL[46]-Vani.Overall0.040.280.841.210.301.850.682.674.043.27
Jail.Overall2.482.734.253.833.404.113.683.154.224.18
R1-OV [38]Vani.Think3.102.972.633.493.403.673.103.453.624.23
Answer3.553.293.234.193.924.513.204.104.364.32
Overall3.033.072.703.993.484.403.183.724.354.28
Jail.Think4.123.624.274.354.143.904.123.203.844.20
Answer4.363.864.664.594.514.154.543.714.034.25
Overall4.313.914.614.604.374.324.333.504.224.21
Eureka-QW [14]Vani.Think1.951.973.203.452.704.002.903.333.933.54
Answer2.031.873.343.112.294.152.553.354.043.62
Overall2.232.013.363.532.654.222.973.233.993.90
Jail.Think3.403.124.004.243.903.913.793.194.013.74
Answer2.893.254.304.033.663.823.673.083.913.71
Overall3.393.454.254.243.974.054.013.254.113.91
QW2-VL [47]-Vani.Overall0.050.050.550.790.141.750.361.853.822.25
Jail.Overall3.663.294.344.224.274.394.063.104.053.88
MBerry-QW [39]Vani.Think2.122.342.573.063.122.962.632.573.282.83
Answer2.452.473.323.442.623.953.063.353.883.61
Overall2.502.443.393.552.754.103.033.654.083.82
Jail.Think3.272.873.023.583.452.973.472.382.492.91
Answer4.414.074.484.474.444.084.323.393.863.86
Overall4.484.194.364.574.604.334.453.554.083.99
LMA3-LVA [48]-Vani.Overall0.150.501.451.920.582.450.912.074.072.64
Jail.Overall3.773.154.054.224.053.763.963.323.803.85
MBerry-LVA [39]Vani.Overall1.953.364.413.393.743.802.603.323.993.55
Jail.Overall3.853.523.554.033.773.783.893.253.443.79
LMA-3.2-Vision [43]-Vani.Overall0.100.732.071.760.624.431.093.734.423.63
Jail.Overall0.761.662.181.961.883.592.373.044.173.84
MBerry-LMA [39]Vani.Overall0.100.641.111.310.623.000.763.834.294.23
Jail.Overall3.383.182.983.583.113.653.423.513.564.07
" + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.668, + 0.825, + 0.684 + ], + "angle": 0, + "content": "Key Findings. Figures 1 and 2 reveal systematic safety degradation across all MLRMs. Specifically," + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.688, + 0.825, + 0.729 + ], + "angle": 0, + "content": "- Overall Safety Collapse: MLRMs exhibit \\(31.30\\%\\) higher ASR (\\(59.52\\%\\) vs. base MLLMs' \\(28.22\\%\\)) and 1.64 higher HR (\\(3.07\\%\\) vs. 1.43), demonstrating that exposing reasoning chains fundamentally expands attack surfaces." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.731, + 0.825, + 0.771 + ], + "angle": 0, + "content": "- Persistent Answer Degradation: Even when ignoring unsafe reasoning steps, MLRM answers show \\(25.89\\%\\) higher ASR and 1.30 higher HR compared to base models. This proves safety erosion persists beyond reasoning exposure, suggesting SFT/RL alignment damages intrinsic safeguards." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.772, + 0.825, + 0.84 + ], + "angle": 0, + "content": "- Architectural Vulnerability: Qwen2.5-based MLRMs suffer catastrophic safety loss (ASR + 40.06%, HR + 2.02), suggesting architectural incompatibility between reasoning and safety mechanisms. Notably, MBerry-LMA exhibits an inverse trend: its safety metrics improve post-reasoning augmentation (ASR - 4.8%, HR - 0.21). We believe this anomaly hints at potential safeguards, and may provide a rare blueprint for designing safety-resilient reasoning models." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.841, + 0.825, + 0.868 + ], + "angle": 0, + "content": "- Reasoning as Attack Vector: Unsafe reasoning occurs \\(12.52\\%\\) more frequently than unsafe answers, highlighting systemic risks in exposing intermediate reasoning to users." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.688, + 0.825, + 0.868 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.098, + 0.825, + 0.14 + ], + "angle": 0, + "content": "Table 2: Comparison of HR scores on SafetyBench datasets across various MLRMs and their base MLLMs. For abbreviations, Vani. and Jail. refer to performance under vanilla unsafe text queries and jailbreak attacks, respectively. The dash in the \"Model\" column represents the base MLLMs." + }, + { + "type": "table", + "bbox": [ + 0.182, + 0.141, + 0.825, + 0.657 + ], + "angle": 0, + "content": "
BaseModelMethodStageIAHSRAPHFRPOPVLOFAHC
QW2.5-VL [46]-Vani.Overall0.220.420.000.180.501.960.743.943.923.70
Jail.Overall2.782.684.363.044.084.043.143.564.323.62
R1-OV [38]Vani.Think3.302.574.122.943.484.173.103.404.003.22
Answer4.003.193.983.433.944.593.393.763.944.18
Overall3.442.663.843.143.664.302.983.224.063.64
Jail.Think4.123.794.864.584.444.204.523.403.804.34
Answer4.673.884.914.604.514.384.583.673.874.02
Overall4.183.784.884.624.544.484.523.583.883.98
Eureka-QW [14]Vani.Think2.891.613.262.142.184.042.283.303.903.54
Answer2.501.583.021.941.864.142.123.143.643.50
Overall2.501.503.041.901.844.002.303.303.543.28
Jail.Think3.142.843.903.343.464.183.503.383.783.62
Answer3.262.984.123.603.604.143.663.263.723.36
Overall3.322.83.983.523.584.183.543.523.863.60
QW2-VL [47]-Vani.Overall0.280.340.280.060.062.240.763.083.923.68
Jail.Overall3.302.744.823.344.404.483.824.044.284.40
MBerry-QW [39]Vani.Think2.722.423.142.392.623.372.622.713.243.33
Answer2.962.473.452.682.964.102.843.803.653.92
Overall2.882.303.422.463.064.162.683.723.663.86
Jail.Think4.153.954.484.104.314.364.163.733.893.82
Answer4.464.084.804.384.564.664.403.564.004.20
Overall4.694.244.884.424.644.524.463.464.004.18
LMA3-LVA [48]-Vani.Overall0.660.842.260.861.082.601.203.323.863.58
Jail.Overall4.223.064.484.364.243.764.043.323.803.85
MBerry-LVA [39]Vani.Overall1.901.463.681.523.283.482.223.624.043.64
Jail.Overall4.383.564.424.083.944.043.843.383.383.58
LMA-3.2-Vision [43]-Vani.Overall1.781.361.200.721.684.301.763.904.404.00
Jail.Overall2.923.304.263.863.644.223.583.523.983.96
MBerry-LMA [39]Vani.Overall1.180.721.700.881.003.181.123.844.243.64
Jail.Overall3.964.044.744.524.484.463.943.903.984.20
" + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.667, + 0.474, + 0.682 + ], + "angle": 0, + "content": "Takeaway 3.1: Reasoning Tax in MLRMs" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.693, + 0.801, + 0.723 + ], + "angle": 0, + "content": "SFT/RL-based reasoning acquisition severely compromises safety alignment of base MLLMs, a phenomenon we term the \"reasoning tax\" in MLRMs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.76, + 0.341, + 0.775 + ], + "angle": 0, + "content": "3.2 Safety Blind Spots" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.788, + 0.825, + 0.844 + ], + "angle": 0, + "content": "We conduct fine-grained analysis to uncover safety blind spots—scenarios where MLRMs exhibit catastrophic safety failures despite base MLLMs achieving near-perfect alignment. We ask: (1) Do MLRMs inherit base models' scenario-specific safety profiles? (2) Does the reasoning tax manifest heterogeneously across scenarios?" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Experimental Protocol. Following [37], we evaluate 10 safety-critical scenarios: Illegal Activity, Hate Speech, Malware Generation, Physical Harm, Fraud, Pornography, Privacy Violence, Legal Opinion, Financial Advice, and Consultation. Comparison of HR scores across various scenarios on MM-SafetyBench and SafetyBench datasets are exhibited in Table 1 and 2, respectively." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.505, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.174, + 0.089, + 0.34, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.352, + 0.09, + 0.496, + 0.217 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.091, + 0.653, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.665, + 0.091, + 0.822, + 0.216 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.224, + 0.343, + 0.247 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.349, + 0.224, + 0.5, + 0.247 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.224, + 0.65, + 0.247 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.656, + 0.215, + 0.821, + 0.247 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.247, + 0.339, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.349, + 0.247, + 0.498, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.508, + 0.247, + 0.653, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.656, + 0.247, + 0.822, + 0.359 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.186, + 0.367, + 0.343, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.348, + 0.367, + 0.5, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.511, + 0.367, + 0.65, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.656, + 0.357, + 0.821, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.396, + 0.828, + 0.44 + ], + "angle": 0, + "content": "Figure 3: Comparison of ASR scores across different MLRMs and their base MLLMs. For abbreviations, va. and ja. refer to performance under vanilla unsafe text queries and jailbreak attacks, respectively. We use MB to denote MLRM that are developed with MBerry method." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.449, + 0.825, + 0.478 + ], + "angle": 0, + "content": "Additionally, we provide a more intuitive display for ASR in the form of a radar chart, as shown in Figure 3." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.489, + 0.828, + 0.603 + ], + "angle": 0, + "content": "Key Findings. Experimental results reveal alarming safety blind spots. For instance, in I11legal Activity scenario, MLRMs show \\(3.79 \\times\\) higher ASR than base MLLMs on average. Furthermore, while the base MLLM Qwen2.5-VL achieves near-perfect safety (ASR \\(< 3\\%\\)), its MLRM derivative R1-Onevision suffers catastrophic failure (ASR \\(> 50\\%\\)), achieving near \\(25 \\times\\) degradation. This exposes catastrophic alignment erosion in reasoning-enhanced architectures. Overall, safety degradation varies dramatically across scenarios (\\(\\Delta\\)ASR range: \\(8.1\\% - 2500\\%\\)), with I11legal Activity/Pornography being most/least affected scenarios, demanding urgent scenario-specific red teaming and adaptive alignment protocols for MLRMs." + }, + { + "type": "title", + "bbox": [ + 0.198, + 0.619, + 0.424, + 0.635 + ], + "angle": 0, + "content": "Takeaway 3.2: Safety Blind Spots" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.645, + 0.801, + 0.677 + ], + "angle": 0, + "content": "MLRMs introduce critical safety blind spots — scenarios where base MLLMs excel (e.g., Illegal Activity) become catastrophic vulnerabilities post-reasoning augmentation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.718, + 0.395, + 0.733 + ], + "angle": 0, + "content": "3.3 Emergent Self-Correction" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.747, + 0.827, + 0.79 + ], + "angle": 0, + "content": "In this section, we aim to analyze the relationship between thinking safety and answer safety. This helps uncover potential correlations in the safety resilience across different stages of MLRM outputs, providing insights for constructing hierarchical defense strategies." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.801, + 0.828, + 0.873 + ], + "angle": 0, + "content": "Experimental Protocol. We select three MLRMs (i.e., R1-OneVision, MM-EUREKA-Qwen, Mulberry-Qwen), all strictly following \"Think + Answer\" output formats. From the 10 safety-critical scenarios, we randomly sample 100 adversarial queries per scenario, forming a 1000-sample test set. For each query, we compute Think-HR and Answer-HR under jailbreaking and visualize their normalized joint distribution via 2D heatmaps, as exhibited in Figure 4." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.883, + 0.828, + 0.914 + ], + "angle": 0, + "content": "Key Findings. Figure 4 reveals strong symmetric coupling between Think-HR and Answer-HR. Specifically," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.177, + 0.096, + 0.357, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.382, + 0.096, + 0.562, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.589, + 0.095, + 0.822, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.259, + 0.357, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.383, + 0.259, + 0.562, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.589, + 0.26, + 0.822, + 0.404 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.415, + 0.827, + 0.457 + ], + "angle": 0, + "content": "Figure 4: The relationship between reasoning safety and answer safety, where the horizontal and vertical axes represent HR scores. The numbers in the color blocks represent the normalized probabilities, with deeper colors indicating higher probabilities. Best viewed in color." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.468, + 0.825, + 0.538 + ], + "angle": 0, + "content": "- Reasoning-Answer Coupling: When reasoning steps are compromised (Think-HR > 3), answer jailbreaking success rate reaches \\(90.9\\%\\), indicating that unsafe cognitive frameworks propagate to outputs. Conversely, \\(93.7\\%\\) of compromised answers (Answer-HR > 3) originate from unsafe reasoning steps, suggesting output vulnerabilities necessitate but are not fully determined by reasoning flaws." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.537, + 0.827, + 0.607 + ], + "angle": 0, + "content": "- Emergent Self-Correction: A subset of MLRMs (e.g., Mulberry-Qwen) exhibit right-upper quadrant clustering in heatmaps, where \\(12.4\\%\\) of unsafe reasoning steps (Think-HR \\(>3\\)) yield safe answers (Answer-HR \\(\\leq 3\\)), exhibiting emergent self-correction capabilities in MLRMs. This nascent capability provides a foothold for layered defenses: hardening reasoning steps while amplifying innate safeguards." + }, + { + "type": "list", + "bbox": [ + 0.172, + 0.468, + 0.827, + 0.607 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.199, + 0.617, + 0.468, + 0.632 + ], + "angle": 0, + "content": "Takeaway 3.3: Emergent Self-Correction" + }, + { + "type": "text", + "bbox": [ + 0.197, + 0.644, + 0.8, + 0.673 + ], + "angle": 0, + "content": "MLRMs exhibit intrinsic self-correction: \\(12.4\\%\\) of unsafe reasoning chains are overridden by safe answers, revealing preserved safeguards that reject harmful cognition." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.702, + 0.322, + 0.717 + ], + "angle": 0, + "content": "4 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.732, + 0.827, + 0.911 + ], + "angle": 0, + "content": "Multi-modal Reasoning in Language Models. CoT reasoning process has proven effective for enhancing multi-modal capabilities in language models [25]. Initial approaches focused on prompt engineering such as constructing zero-shot prompts like \"think step-by-step to understand the given text and image inputs\" directly [17]. In contrast, the cascade paradigm achieved multi-modal reasoning asynchronously (e.g., using one model to describe an image for an LRM to reason) [24]. Reinforcement learning-based models such as LLaVA-Reasoner [9] and Insight-V [12] leveraged CoT datasets and direct preference optimization (DPO) with human feedback to acquire intrinsic reasoning skills. The success of DeepSeek-R1 further catalyzed this trend, inspiring derivative architectures such as Easy-R1 [49], R1-OneVision [50], Multimodal-Open-R1 [13], R1-V [51], VLM-R1 [10] and LMM-R1 [52]. Notably, process reward models (PRMs) like MSTaR [241] and VisualPRM [242] represent a paradigm shift from outcome-focused reward models (ORMs). By providing stepwise feedback during reasoning, PRMs enhance self-consistency and enable iterative self-improvement in MLLMs [25]." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.947 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.827, + 0.258 + ], + "angle": 0, + "content": "Safety Exploration of LRMs. The rapid advancement of LRMs has prompted growing attention to their safety risks. Recent work systematically evaluates mainstream LRMs across multiple dimensions [28, 35, 32, 33]. For example, [53] reveals significant gaps between open-source R1 models and commercial counterparts like o3-mini, while [54] verifies that DeepSeek-R1 failed to successfully block most harmful prompts, exposing critical vulnerabilities under adversarial conditions. Works by [34, 27, 55, 26, 30] further probe the safety boundaries of the LRM and identify their failure modes. Concurrently, researchers have begun investigating attack and defense paradigms specific to LRMs — [29] demonstrates that LRMs are uniquely vulnerable to fine-tuning attacks and [31] critiques the limitations of RL-based safety alignment (e.g., reward hacking, generalization failures) in mitigating harmful outputs. However, these efforts focus narrowly on unimodal LRMs, leaving the safety implications of MLRMs largely unexplored — a critical gap given the distinct risks introduced by cross-modal interactions inherent to MLRM architectures." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.277, + 0.303, + 0.293 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.307, + 0.828, + 0.475 + ], + "angle": 0, + "content": "The rapid integration of reasoning capabilities into MLLMs has birthed powerful MLRMs with transformative potential. However, our systematic evaluation reveals that this advancement comes at a profound cost to safety. Through large-scale empirical studies comparing MLRMs with their base MLLMs, we uncover three critical insights: (1) A reasoning tax: MLRMs suffer \\(37.44\\%\\) higher jailbreaking rates than base MLLMs due to eroded safety alignment; (2) Scenario-specific blind spots: risks spike \\(25\\times\\) in contexts like Illegal Activity, far exceeding average degradation; (3) Emergent self-correction: Despite tight reasoning- answer safety coupling, MLRMs demonstrate nascent self-correction. That is, \\(16.23\\%\\) of jailbroken reasoning steps are overridden by safe answers, hinting at intrinsic safeguards. To catalyze mitigation, we release OpenSafeMLRM, an open-source toolkit with unified interface for evaluating mainstream MLRMs, datasets, and attacks. These findings mandate urgent efforts to govern how multi-modal AI reasons ensuring capability advances align with ethical imperatives." + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.493, + 0.269, + 0.509 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.516, + 0.827, + 0.738 + ], + "angle": 0, + "content": "[1] DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, and S. S. Li. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. CoRR, abs/2501.12948, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.181, + 0.746, + 0.828, + 0.913 + ], + "angle": 0, + "content": "[2] DeepSeek-AI, Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Daya Guo, Dejian Yang, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Haowei Zhang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Li, Hui Qu, J. L. Cai, Jian Liang, Jianzhong Guo, Jiaqi Ni, Jiashi Li, Jiawei Wang, Jin Chen, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, Junxiao Song, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Lei Xu, Leyi Xia, Liang Zhao, Litong Wang, Liyue Zhang, Meng Li, Miaojun Wang, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Mingming Li, Ning Tian, Panpan Huang, Peiyi Wang, Peng Zhang, Qiancheng Wang, Qihao Zhu, Qinyu Chen, Qiushi Du, R. J. Chen, R. L. Jin, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, Runxin Xu, Ruoyu Zhang, Ruyi Chen, S. S. Li, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shaoqing Wu" + }, + { + "type": "list", + "bbox": [ + 0.18, + 0.516, + 0.828, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.208, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "Shengfeng Ye, Shengfeng Ye, Shirong Ma, Shiyu Wang, Shuang Zhou, Shuiping Yu, Shunfeng Zhou, Shuting Pan, T. Wang, Tao Yun, Tian Pei, Tianyu Sun, W. L. Xiao, and Wangding Zeng. Deepseek-v3 technical report. CoRR, abs/2412.19437, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.144, + 0.521, + 0.16 + ], + "angle": 0, + "content": "[3] OpenAI. Learning to reason with LLMs. 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.17, + 0.825, + 0.2 + ], + "angle": 0, + "content": "[4] Edward Y. Chang, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. Demystifying long chain-of-thought reasoning in llms. CoRR, abs/2502.03373, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.209, + 0.826, + 0.253 + ], + "angle": 0, + "content": "[5] Daya Guo, Qihao Zhu, Dejian Yang, Zhenda Xie, Kai Dong, Wentao Zhang, Guanting Chen, Xiao Bi, Yu Wu, YK Li, et al. Deepseek-coder: When the large language model meets programming-the rise of code intelligence. arXiv preprint arXiv:2401.14196, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.262, + 0.826, + 0.306 + ], + "angle": 0, + "content": "[6] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.315, + 0.826, + 0.358 + ], + "angle": 0, + "content": "[7] Jingyi Zhang, Jiaxing Huang, Huanjin Yao, Shunyu Liu, Xikun Zhang, Shijian Lu, and Dacheng Tao. R1-vl: Learning to reason with multimodal large language models via stepwise group relative policy optimization. 2025. arXiv preprint arXiv:2503.12397." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.368, + 0.826, + 0.397 + ], + "angle": 0, + "content": "[8] Hengguang Zhou, Xinui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. R1-zero's \"aha moment\" in visual reasoning on a 2b non-sft model. 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.407, + 0.826, + 0.451 + ], + "angle": 0, + "content": "[9] Ruohong Zhang, Bowen Zhang, Yanghao Li, Haotian Zhang, Zhiqing Sun, Zhe Gan, Yinfei Yang, Ruoming Pang, and Yiming Yang. Improve vision language model chain-of-thought reasoning. arXiv preprint arXiv:2410.16198, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.46, + 0.825, + 0.491 + ], + "angle": 0, + "content": "[10] Haozhan Shen, Zilun Zhang, Qianqian Zhang, Ruochen Xu, and Tiancheng Zhao. Vlm-r1: A stable and generalizable r1-style large vision-language model. 2025. Accessed: 2025-02-15." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.5, + 0.825, + 0.53 + ], + "angle": 0, + "content": "[11] Liu Yuqi, Peng Bohao, Zhong Zhisheng, Yue Zihao, Lu Fanbin, Yu Bei, and Jia Jiaya. Seg-zero: Reasoning-chain guided segmentation via cognitive reinforcement. 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.539, + 0.825, + 0.582 + ], + "angle": 0, + "content": "[12] Yuhao Dong, Zuyan Liu, Hai-Long Sun, Jingkang Yang, Winston Hu, Yongming Rao, and Ziwei Liu. Insight-v: Exploring long-chain visual reasoning with multimodal large language models. arXiv preprint arXiv:2411.14432, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.592, + 0.815, + 0.609 + ], + "angle": 0, + "content": "[13] EvolvingLMMs Lab. Multimodal open r1. GitHub repository, 2025. Accessed: 2025-02-28." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.618, + 0.825, + 0.674 + ], + "angle": 0, + "content": "[14] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfen Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, Ping Luo, Yu Qiao, Qiaosheng Zhang, and Wenqi Shao. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.685, + 0.825, + 0.714 + ], + "angle": 0, + "content": "[15] Zhangquan Chen, Xufang Luo, and Dongsheng Li. Visrl: Intention-driven visual perception via reinforced reasoning. 2025. arXiv preprint arXiv:2503.07523." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.724, + 0.825, + 0.753 + ], + "angle": 0, + "content": "[16] Jiaxing Zhao, Xihan Wei, and Liefeng Bo. R1-omni: Explainable omni-multimodal emotion recognition with reinforcement learning. arXiv e-prints, pages arXiv-2503, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.763, + 0.825, + 0.82 + ], + "angle": 0, + "content": "[17] Vaishnavi Himakunthala, Andy Ouyang, Daniel Rose, Ryan He, Alex Mei, Yujie Lu, Chinmay Sonar, Michael Saxon, and William Yang Wang. Let's think frame by frame with VIP: A video infilling and prediction dataset for evaluating video chain-of-thought. arXiv preprint arXiv:2305.13903, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.83, + 0.825, + 0.859 + ], + "angle": 0, + "content": "[18] Fanxu Meng, Haotong Yang, Yiding Wang, and Muhan Zhang. Chain of images for intuitively reasoning. arXiv preprint arXiv:2311.09241, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.869, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[19] Jingran Xie, Shun Lei, Yue Yu, Yang Xiang, Hui Wang, Xixin Wu, and Zhiyong Wu. Leveraging chain of thought towards empathetic spoken dialogue without corresponding question answering data. arXiv preprint arXiv:2501.10937, 2025." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.091, + 0.826, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[20] Xuewen Luo, Fan Ding, Yinsheng Song, Xiaofeng Zhang, and Junnyong Loo. Pkrd-cot: A unified chain-of-thought prompting for multi-modal large language models in autonomous driving. arXiv preprint arXiv:2412.02025, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.141, + 0.825, + 0.185 + ], + "angle": 0, + "content": "[21] Haojie Zheng, Tianyang Xu, Hanchi Sun, Shu Pu, Ruoxi Chen, and Lichao Sun. Thinking before looking: Improving multimodal llm reasoning via mitigating visual hallucination. arXiv preprint arXiv:2411.12591, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.192, + 0.826, + 0.249 + ], + "angle": 0, + "content": "[22] Timin Gao, Peixian Chen, Mengdan Zhang, Chaoyou Fu, Yunhang Shen, Yan Zhang, Shengchuan Zhang, Xiawu Zheng, Xing Sun, Liujuan Cao, et al. Cantor: Inspiring multimodal chain-of-thought of mllm. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 9096-9105, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.256, + 0.826, + 0.301 + ], + "angle": 0, + "content": "[23] Wenshan Wu, Shaoguang Mao, Yadong Zhang, Yan Xia, Li Dong, Lei Cui, and Furu Wei. Mind's eye of llms: Visualization-of-thought elicits spatial reasoning in large language models. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.306, + 0.826, + 0.351 + ], + "angle": 0, + "content": "[24] Bozhi Luan, Hao Feng, Hong Chen, Yonghui Wang, Wengang Zhou, and Houqiang Li. Textcot: Zoom in for enhanced multimodal text-rich image understanding. arXiv preprint arXiv:2404.09797, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.357, + 0.826, + 0.401 + ], + "angle": 0, + "content": "[25] Yaoting Wang, Shengqiong Wu, Yuecheng Zhang, William Wang, Ziwei Liu, Jiebo Luo, and Hao Fei. Multimodal chain-of-thought reasoning: A comprehensive survey. arXiv preprint arXiv:2503.12605, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.407, + 0.692, + 0.424 + ], + "angle": 0, + "content": "[26] Chatterbox. Gen ai safety model comparison and reasoning results, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.43, + 0.724, + 0.447 + ], + "angle": 0, + "content": "[27] Adversa.ai. Introducing safety aligned deepseek r1 model by enkrypt ai, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.453, + 0.826, + 0.496 + ], + "angle": 0, + "content": "[28] Wenjing Zhang, Xuejiao Lei, Zhaoxiang Liu, Ning Wang, Zhenhong Long, Peijun Yang, Jiaojiao Zhao, Minjie Hua, Chaoyang Ma, Kai Wang, et al. Safety evaluation of deepseek models in chinese contexts. arXiv preprint arXiv:2502.11137, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.503, + 0.826, + 0.546 + ], + "angle": 0, + "content": "[29] Zhiyuan Xu, Joseph Gardiner, and Sana Belguith. The dark deep side of deepseek: Fine-tuning attacks against the safety alignment of cot-enabled models. arXiv preprint arXiv:2502.01225, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.553, + 0.826, + 0.597 + ], + "angle": 0, + "content": "[30] Haoyu Wang, Zeyu Qin, Li Shen, Xueqian Wang, Minhao Cheng, and Dacheng Tao. Leveraging reasoning with guidelines to elicit and utilize knowledge for enhancing safety alignment. arXiv preprint arXiv:2502.04040, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.604, + 0.826, + 0.647 + ], + "angle": 0, + "content": "[31] Manojkumar Parmar and Yuvaraj Govindarajulu. Challenges in ensuring ai safety in deepseek-r1 models: The shortcomings of reinforcement learning strategies. arXiv preprint arXiv:2501.17030, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.654, + 0.826, + 0.698 + ], + "angle": 0, + "content": "[32] Abhinav Kumar, Jaechul Roh, Ali Naseh, Marzena Karpinska, Mohit Iyyer, Amir Houmansadr, and Eugene Bagdasarian. Overthink: Slowdown attacks on reasoning llms. arXiv e-prints, page arXiv-2502, February 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.704, + 0.826, + 0.762 + ], + "angle": 0, + "content": "[33] Qiguang Chen, Libo Qin, Jiaqi Wang, Jingxuan Zhou, and Wanxiang Che. Unlocking the capabilities of thought: A reasoning boundary framework to quantify and optimize chain-of-thought. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.769, + 0.826, + 0.813 + ], + "angle": 0, + "content": "[34] Zonghao Ying, Guangyi Zheng, Yongxin Huang, Deyue Zhang, Wenxin Zhang, Quanchen Zou, Aishan Liu, Xianglong Liu, and Dacheng Tao. Towards understanding the safety boundaries of deepseek models: Evaluation and findings. arXiv preprint arXiv:2503.15092, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.819, + 0.826, + 0.862 + ], + "angle": 0, + "content": "[35] Tiansheng Huang, Sihao Hu, Fatih Ilhan, Selim Furkan Tekin, Zachary Yahn, Yichang Xu, and Ling Liu. Safety tax: Safety alignment makes your large reasoning models less reasonable. arXiv preprint arXiv:2503.00555, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.869, + 0.826, + 0.913 + ], + "angle": 0, + "content": "[36] Mang Ye, Xuankun Rong, Wenke Huang, Bo Du, Nenghai Yu, and Dacheng Tao. A survey of safety on large vision-language models: Attacks, defenses and evaluations. CoRR, abs/2502.14881, 2025." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[37] Xin Liu, Yichen Zhu, Jindong Gu, Yunshi Lan, Chao Yang, and Yu Qiao. Mm-safetybench: A benchmark for safety evaluation of multimodal large language models. In ECCV (56), volume 15114 of Lecture Notes in Computer Science, pages 386-403. Springer, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.144, + 0.827, + 0.2 + ], + "angle": 0, + "content": "[38] Yi Yang, Xiaoxuan He, Hongkun Pan, Xiyan Jiang, Yan Deng, Xingtao Yang, Haoyu Lu, Dacheng Yin, Fengyun Rao, Minfeng Zhu, Bo Zhang, and Wei Chen. R1-onevision: Advancing generalized multimodal reasoning through cross-modal formalization. arXiv preprint arXiv:2503.10615, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.21, + 0.827, + 0.267 + ], + "angle": 0, + "content": "[39] Huanjin Yao, Jiaxing Huang, Wenhao Wu, Jingyi Zhang, Yibo Wang, Shunyu Liu, Yingjie Wang, Yuxin Song, Haocheng Feng, Li Shen, et al. Mulberry: Empowering mllm with o1-like reasoning and reflection via collective monte carlo tree search. arXiv preprint arXiv:2412.18319, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.277, + 0.827, + 0.321 + ], + "angle": 0, + "content": "[40] Yichen Gong, Delong Ran, Jinyuan Liu, Conglei Wang, Tianshuo Cong, Anyu Wang, Sisi Duan, and Xiaoyun Wang. Figstep: Jailbreaking large vision-language models via typographic visual prompts. arXiv preprint arXiv:2311.05608, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.33, + 0.825, + 0.373 + ], + "angle": 0, + "content": "[41] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.383, + 0.827, + 0.426 + ], + "angle": 0, + "content": "[42] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.436, + 0.827, + 0.479 + ], + "angle": 0, + "content": "[43] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Jasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.488, + 0.825, + 0.518 + ], + "angle": 0, + "content": "[44] Yu Wang, Xiaofei Zhou, Yichen Wang, Geyuan Zhang, and Tianxing He. Jailbreak large visual language models through multi-modal linkage. arXiv preprint arXiv:2412.00473, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.527, + 0.827, + 0.571 + ], + "angle": 0, + "content": "[45] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.58, + 0.478, + 0.596 + ], + "angle": 0, + "content": "[46] Qwen Team. Qwen2.5-vl, January 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.606, + 0.827, + 0.675 + ], + "angle": 0, + "content": "[47] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.685, + 0.825, + 0.729 + ], + "angle": 0, + "content": "[48] Bo Li, Kaichen Zhang, Hao Zhang, Dong Guo, Renrui Zhang, Feng Li, Yuanhan Zhang, Ziwei Liu, and Chunyuan Li. Llava-last: Stronger llms supercharge multimodal capabilities in the wild, May 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.739, + 0.827, + 0.768 + ], + "angle": 0, + "content": "[49] Zheng Yaowei, Lu Junting, Wang Shenzhi, Feng Zhangchi, Kuang Dongdong, and Xiong Yuwen. Easyr1: An efficient, scalable, multi-modality rl training framework. 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.777, + 0.827, + 0.82 + ], + "angle": 0, + "content": "[50] Yangyi VAI. R1-onevision: Open-source multimodal large language model with reasoning. Online, 2025. Available at: https://yangyi-vali.notion.site/r1-onevision#198b1e4047f780c78306fb451be7160d." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.83, + 0.825, + 0.859 + ], + "angle": 0, + "content": "[51] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. 2025. Accessed: 2025-02-02." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.869, + 0.827, + 0.912 + ], + "angle": 0, + "content": "[52] Peng Yingzhe, Zhang Gongrui, Zhang Miaosen, You Zhiyuan, Liu Jie, Zhu Qipeng, Yang Kai, Xu Xingzhong, Geng Xin, and Yang Xu. Lmm-r1: Empowering 3b lmm's with strong reasoning abilities through two-stage rule-based rl. 2025." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.121 + ], + "angle": 0, + "content": "[53] Aitor Arrieta, Miriam Ugarte, Pablo Valle, José Antonio Parejo, and Sergio Segura. O3-mini vs deepseek-r1: Which one is safer? arXiv preprint arXiv:2501.18438, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.129, + 0.826, + 0.184 + ], + "angle": 0, + "content": "[54] Mantas Mazeika, Long Phan, Xuwang Yin, Andy Zou, Zifan Wang, Norman Mu, Elham Sakhaaee, Nathaniel Li, Steven Basart, Bo Li, et al. Harmbench: A standardized evaluation framework for automated red teaming and robust refusal. arXiv preprint arXiv:2402.04249, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.194, + 0.826, + 0.236 + ], + "angle": 0, + "content": "[55] Kaiwen Zhou, Chengzhi Liu, Xuandong Zhao, Shreedhar Jangam, Jayanth Srinivasa, Gaowen Liu, Dawn Song, and Xin Eric Wang. The hidden risks of large reasoning models: A safety assessment of r1. arXiv preprint arXiv:2502.12659, 2025." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.236 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "13" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_08xxx/2504.08813/af01798f-8993-43b5-b109-47e5f260520d_origin.pdf b/data/2025/2504_08xxx/2504.08813/af01798f-8993-43b5-b109-47e5f260520d_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..b73c0675d1ac95e3eae227ad7cccd1a0b7c5841f --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/af01798f-8993-43b5-b109-47e5f260520d_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f959b330a20a0f28a6b7990aa04173e3b78e27905b5dd3fedfc32cf440b692bc +size 2489492 diff --git a/data/2025/2504_08xxx/2504.08813/full.md b/data/2025/2504_08xxx/2504.08813/full.md new file mode 100644 index 0000000000000000000000000000000000000000..4283706797cb4188d496d012efb0a92d688d6fa7 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/full.md @@ -0,0 +1,251 @@ +# SafeMLRM: Demystifying Safety in Multi-modal Large Reasoning Models + +Junfeng Fang $^{1}$ , Yukai Wang $^{1}$ , Ruipeng Wang $^{2}$ , Zijun Yao $^{1}$ , Kun Wang $^{3}$ , An Zhang $^{2}$ , Xiang Wang $^{2*}$ , Tat-Seng Chua $^{1}$ + +$^{1}$ National University of Singapore, $^{2}$ University of Science and Technology of China + $^{3}$ Nanyang Technological University +fangjf1997@gmail.com + +# Abstract + +The rapid advancement of multi-modal large reasoning models (MLRMs) — enhanced versions of multimodal language models (MLLMs) equipped with reasoning capabilities — has revolutionized diverse applications. However, their safety implications remain underexplored. While prior work has exposed critical vulnerabilities in unimodal reasoning models, MLRMs introduce distinct risks from cross-modal reasoning pathways. This work presents the first systematic safety analysis of MLRMs through large-scale empirical studies comparing MLRMs with their base MLLMs. Our experiments reveal three critical findings: (1) The Reasoning Tax: Acquiring reasoning capabilities catastrophically degrades inherited safety alignment. MLRMs exhibit $37.44\%$ higher jailbreaking success rates than base MLLMs under adversarial attacks. (2) Safety Blind Spots: While safety degradation is pervasive, certain scenarios (e.g., Illegal Activity) suffer $25\times$ higher attack rates — far exceeding the average $3.4\times$ increase, revealing scenario-specific vulnerabilities with alarming cross-model and datasets consistency. (3) Emergent Self-Correction: Despite tight reasoning-answer safety coupling, MLRMs demonstrate nascent self-correction — $16.9\%$ of jailbroken reasoning steps are overridden by safe answers, hinting at intrinsic safeguards. These findings underscore the urgency of scenario-aware safety auditing and mechanisms to amplify MLRMs' self-correction potential. To catalyze research, we open-source OpenSafeMLRM, the first toolkit for MLRM safety evaluation, providing unified interface for mainstream models, datasets, and jailbreaking methods. Our work calls for immediate efforts to harden reasoning-augmented AI, ensuring its transformative potential aligns with ethical safeguards. + +![](images/5baac71a839d0492466b5b3bea9347d1027212b1c74ece6233e117dac9f1c557.jpg) + +OpenSafeMLRM + +https://github.com/fangjf1/OpenSafeMLRM + +# 1 Introduction + +With the rapid advancement of AI, large reasoning models (LRMs) like the DeepSeek series have gained significant attention [1, 2, 3, 4]. These models excel in performing complex tasks through meticulous reasoning, enabling transformative impacts across various downstream domains [5, 6]. Recently, this influence has expanded to multi-modal applications [7, 8, 9]. By embedding chain-of-thought data during the fine-tuning or reinforcement learning phrase of base multi-modal large language models (MLLMs) [10, 11, 12], researchers have developed Multi-modal Large Reasoning Models (MLRMs) [13, 14, 15, 16]. Compared to prompt-based multi-modal reasoning (e.g., instructing base model with "Please think step-by-step") [17, 18, 19, 20], MLRMs inherently encode reasoning process, demonstrating greater potential in alignment with human intentions; in contrast to asynchronous multi-modal reasoning paradigm (e.g., using one model to describe an + +image for an LRM to reason) [21, 22, 23, 24], MLRMs are end-to-end models rather than cascaded pipelines, avoiding compounding errors and modality-specific information loss. These advantages position MLRM as a cornerstone for future advancements in AI reasoning [25]. + +However, alongside these reasoning advancements, model safety concerns have become increasingly critical [26, 27, 28], particularly the risks of unsafe content generation [29, 30]. Recent community efforts have evaluated the safety capabilities of mainstream LRMs like DeepSeek, especially their resilience to jailbreaking attacks [31, 32, 33]. These studies reveal urgent findings, demonstrating that exposure of reasoning processes amplifies safety risks, with attack success rates rise dramatically [34, 35]. While these findings motivate deeper investigations into LRM safety, the safety protocol of MLRMs remains underexplored. This gap demands imperative attention: multi-modal integration introduces novel attack vectors like cross-modal adversarial triggers, fundamentally expanding the threat surface beyond traditional text-only vulnerabilities [36, 37]. + +This study conducts the first systematic safety evaluation of advancing MLRMs. To catalyze this, we open-source OpenSafeMLRM, the first toolkit for MLRM safety evaluation, providing unified interface for various models, datasets, and jailbreaking methods. With OpenSafeMLRM, we evaluate mainstream MLRMs such as R1-Onevision [38], MM-Eureka-Qwen [14], Mulberry-Qwen2VL [39], Mulberry-Llava [39] and Mulberry-Llama [39] across 10 canonical unsafe scenarios curated from MM-SafetyBench [37], and further adapt black-box jailbreaking methods traditionally used for MLLMs [36] for probing MLRMs' safety resilience. All evaluations are applied to both target MLRMs and their base MLLMs. This comparative framework allows us to quantify how reasoning capability acquisition impacts the safety protocols. + +Empirical results reveal several novel findings: (1) Reasoning Tax: The pursuit of advanced reasoning capabilities via supervised fine-tuning (SFT)/reinforcement learning (RL) imposes a steep safety cost — MLRMs exhibit $37.44\%$ higher attack success rates than their base MLLMs, with safety alignment catastrophically eroded during capability enhancement. (2) Safety Blind Spots: While safety degradation is pervasive, certain scenarios (e.g., Illegal Activity) suffer $25\times$ higher attack rates — far exceeding the average $3.4\times$ increase, revealing scenario-specific vulnerabilities with alarming cross-model and datasets consistency. (3) Emergent Self-Correction: Despite tight reasoning-answer safety coupling, MLRMs demonstrate nascent self-correction: $16.23\%$ of jailbroken reasoning steps are overridden by safe answers, hinting at intrinsic safeguards. + +While our evaluation framework conduct comprehensive experiments, we acknowledge that selection bias in test samples and evaluation criteria design may inevitably introduce measurement distortions. To mitigate this, we will refine this evaluation framework by incorporating additional datasets, models, attack vectors, and defense baselines [36] to enhance its comprehensiveness and reliability. These findings reveal fundamental gaps in MLRMs' current safety mechanisms and expose the alarming efficacy of adversarial strategies in bypassing their safeguards. In summary, our work calls for immediate efforts to harden reasoning-augmented AI, ensuring its transformative potential aligns with ethical safeguards. + +# 2 Experimental Setup + +This work aims to systematically investigate the safety of MLRMs and their safety degradation compared to base MLLMs. Here, we formalize the research aim of this paper and provided specific experimental configurations, including datasets, baseline models and evaluation metrics. + +# Research Aim + +Our aim is to explore the safety vulnerabilities in MLRMs. Through comprehensive analysis across various unsafe scenarios, we highlight key risks and compare the safety performance of MLRMs against their base MLLMs, providing valuable insights for enhancing their safety. + +# 2.1 Datasets & Jailbreaking Methods. + +We evaluate our framework on two widely adopted benchmarks for MLLM safety: MM-SafetyBench [37] and SafetyBench [40]. + +![](images/2fca32aaf7e8dcca1bb4035fe1b80f62456b537f4e8f4441fc1dd307ddd4e2bf.jpg) +Figure 1: Comparison of ASR and HR scores across various MLRMs and their base MLLMs under vanilla unsafe text queries. The dark blue bars represent the ASR and HR of the base MLLMs. Best viewed in color. + +![](images/524be737f917bffa0d7a4defad3e62688344660ebd82d49f19e779fc51fd4ef3.jpg) + +![](images/73d5d2df663c13e44315c732c954870c81e665df6eee922f309b5ac748178580.jpg) + +![](images/e20ee32ac3bfeea3c5379d14e5dbc30e5c7bab73b7bd426f988e4f8b24d0ea6d.jpg) + +- MM-SafetyBench comprises 13 safety-critical scenarios (e.g., illegal activities, hate speech) that MLLMs should strictly prohibit. Constructed via the QueryRelated pipeline [37], this dataset first generates malicious queries across scenarios, then uses GPT-4 [41] to extract unsafe keywords and synthesize three image types: (1) Stable Diffusion (SD) [42] images: Visually aligned with unsafe keywords. (2) Text-overlaid images: Unsafe text superimposed on benign visuals. (3) Hybrid images: SD-generated visuals combined with adversarial typography. Following [37], we adopt hybrid images (empirically shown to be the most effective jailbreaking method among the three) as the jailbreaking method for evaluation. +- SafetyBench covers 10 prohibited topics curated from the OpenAI and Meta's Llama-2 [43] usage policies. Built via the FigStep pipeline [40], it leverages GPT-4 [41] to rewrite queries into instructional formats (e.g., "Steps to manufacture illegal drugs"), enumerates them as numbered lists, and converts these into typographic images. These images are then fed to target models to complete missing steps — a second jailbreaking paradigm in our evaluation. To ensure cross-benchmark consistency, we retain the 10 overlapping safety topics between two datasets. + +# 2.2 Baseline MLRMs + +We evaluate five prominent open-source MLRMs spanning diverse base architectures and parameter scales: R1-OneVision [38], MM-EUREKA-Qwen [14], Mulberry-Qwen2-VL [39], Mulberry-LLaVA [39] and Mulberry-LlaMA [39]. These models exhibit R1-style reasoning capabilities through SFT and RL with human feedback alignment. Specifically, they generate intermediate reasoning steps before producing final answers, enhancing performance on complex multi-modal tasks but simultaneously amplifies safety risks due to extended reasoning pathways. + +# 2.3 Evaluation Metrics + +We selected two evaluation metrics. The first is Harmfulness Rating (HR), which employs an additional LLM to assess the level of harm in the generated content. Following [44], we adopted a two-step evaluation to calculate HR: Initially, the output undergoes a substring lookup; if the model's response contains phrases like "Sorry, I can not", the HR is immediately set to 0. Next, a detailed prompt with specific scoring rules is designed to instruct GPT-4o-mini [45] to evaluate the harmfulness of the model's output according to the rules, using a 0-5 scale where 0 indicates no harmfulness, and 5 indicates extremely harmful content. + +![](images/f2de661e7d249e4c61df89f13c907384e6f0622209aab49011406c1b6d9ff010.jpg) +Figure 2: Comparison of ASR and HR scores across MLRMs and their base MLLMs under jailbreak attacks. The dark blue bars represent the ASR and HR of the base MLLMs. Best viewed in color. + +![](images/3b9bca542edeca217510c4d7e8e53b59bf563b76e85a29e386e4382336688c4d.jpg) + +![](images/1fbee6b1ef5f8c6c74b3dc102f6621c06ea51843f6bf84e089618fe23a75e04a.jpg) + +![](images/dff095cff933feba5a38f3aee9fb251ef563c1d002268138fcfd04d14cdacb37.jpg) + +The second metric is Attack Success Rate (ASR), defined as the number of successful attacks divided by the total number of attempted attacks. Formally, for a particular instruction dataset $D$ , we define $ASR = I(Q_i) / |D|$ , where $Q_i$ represents the set of text and image queries, and $I(\cdot)$ is an indicator function such that $I(\cdot) = 1$ if the response engages with the malicious query, and $I(\cdot) = 0$ otherwise. ASR is a common metric for evaluating model safety and robustness. In our experiments, we addressed a potential issue: directly using the LLM as a judge to determine if the model output has been "jailbroken" introduces considerable uncertainty. Therefore, based on the detailed guidelines in the aforementioned prompt, we define a jailbreak as successful when the HR score equals 4 or 5. + +Evaluation Dimensions: As there has been no comprehensive evaluation specifically targeting MLRMs, we define two key dimensions for assessment: Result Safety and Overall Safety. These are equally important, as different LRM models may present outcomes in varying formats — either as raw results or as reasoning alongside the results. In the following sections, we will exhibit and analyze the metrics for these two dimensions. Note that while we also report metrics related to Reasoning Safety for investigating the relationship between the safety of the reasoning process and the final output, we consider this secondary to the two primary dimensions. + +# 3 Safety Inspection of MLRMs + +# 3.1 Reasoning Tax + +Prior work has observed the "safety tax" in reasoning models, i.e., performance deterioration caused by safety alignment [35]. We identify a critical counterpart: the reasoning tax, defined as the safety degradation incurred when augmenting MLLMs with reasoning capabilities through SFT or RL. To quantify this phenomenon, we systematically compare safety metrics between four safety-aligned base MLLMs and five MLRMs derived from them. + +Experimental Protocol. To isolate the impact of reasoning capability acquisition, we adopt a two-stage evaluation: For base MLLMs, we directly assess the outputs for HR and ASR, as these models lack explicit reasoning step generation. For MLRMs, we collect HR and ASR across Think, Answer and Overall processes. Note that although we employed a standard prompt template to enforce the separation of the reasoning and answer processes, some MLRMs (e.g., Mulberry-LlaMA) still struggled to strictly follow the "Think + Answer" format in their outputs. For these models, we report their overall safety solely. Figure 1 and 2 exhibit the results under vanilla unsafe text queries and jailbreak attacks, respectively. + +Table 1: Comparison of HR scores on MM-SafetyBench datasets across various MLRMs and their base MLLMs. For abbreviations, Vani. and Jail. refer to performance under vanilla unsafe text queries and jailbreak attacks, respectively. The dash in the "Model" column represents the base MLLMs. + +
BaseModelMethodStageIAHSRAPHFRPOPVLOFAHC
QW2.5-VL[46]-Vani.Overall0.040.280.841.210.301.850.682.674.043.27
Jail.Overall2.482.734.253.833.404.113.683.154.224.18
R1-OV [38]Vani.Think3.102.972.633.493.403.673.103.453.624.23
Answer3.553.293.234.193.924.513.204.104.364.32
Overall3.033.072.703.993.484.403.183.724.354.28
Jail.Think4.123.624.274.354.143.904.123.203.844.20
Answer4.363.864.664.594.514.154.543.714.034.25
Overall4.313.914.614.604.374.324.333.504.224.21
Eureka-QW [14]Vani.Think1.951.973.203.452.704.002.903.333.933.54
Answer2.031.873.343.112.294.152.553.354.043.62
Overall2.232.013.363.532.654.222.973.233.993.90
Jail.Think3.403.124.004.243.903.913.793.194.013.74
Answer2.893.254.304.033.663.823.673.083.913.71
Overall3.393.454.254.243.974.054.013.254.113.91
QW2-VL [47]-Vani.Overall0.050.050.550.790.141.750.361.853.822.25
Jail.Overall3.663.294.344.224.274.394.063.104.053.88
MBerry-QW [39]Vani.Think2.122.342.573.063.122.962.632.573.282.83
Answer2.452.473.323.442.623.953.063.353.883.61
Overall2.502.443.393.552.754.103.033.654.083.82
Jail.Think3.272.873.023.583.452.973.472.382.492.91
Answer4.414.074.484.474.444.084.323.393.863.86
Overall4.484.194.364.574.604.334.453.554.083.99
LMA3-LVA [48]-Vani.Overall0.150.501.451.920.582.450.912.074.072.64
Jail.Overall3.773.154.054.224.053.763.963.323.803.85
MBerry-LVA [39]Vani.Overall1.953.364.413.393.743.802.603.323.993.55
Jail.Overall3.853.523.554.033.773.783.893.253.443.79
LMA-3.2-Vision [43]-Vani.Overall0.100.732.071.760.624.431.093.734.423.63
Jail.Overall0.761.662.181.961.883.592.373.044.173.84
MBerry-LMA [39]Vani.Overall0.100.641.111.310.623.000.763.834.294.23
Jail.Overall3.383.182.983.583.113.653.423.513.564.07
+ +Key Findings. Figures 1 and 2 reveal systematic safety degradation across all MLRMs. Specifically, + +- Overall Safety Collapse: MLRMs exhibit $31.30\%$ higher ASR ( $59.52\%$ vs. base MLLMs' $28.22\%$ ) and 1.64 higher HR ( $3.07\%$ vs. 1.43), demonstrating that exposing reasoning chains fundamentally expands attack surfaces. +- Persistent Answer Degradation: Even when ignoring unsafe reasoning steps, MLRM answers show $25.89\%$ higher ASR and 1.30 higher HR compared to base models. This proves safety erosion persists beyond reasoning exposure, suggesting SFT/RL alignment damages intrinsic safeguards. +- Architectural Vulnerability: Qwen2.5-based MLRMs suffer catastrophic safety loss (ASR + 40.06%, HR + 2.02), suggesting architectural incompatibility between reasoning and safety mechanisms. Notably, MBerry-LMA exhibits an inverse trend: its safety metrics improve post-reasoning augmentation (ASR - 4.8%, HR - 0.21). We believe this anomaly hints at potential safeguards, and may provide a rare blueprint for designing safety-resilient reasoning models. +- Reasoning as Attack Vector: Unsafe reasoning occurs $12.52\%$ more frequently than unsafe answers, highlighting systemic risks in exposing intermediate reasoning to users. + +Table 2: Comparison of HR scores on SafetyBench datasets across various MLRMs and their base MLLMs. For abbreviations, Vani. and Jail. refer to performance under vanilla unsafe text queries and jailbreak attacks, respectively. The dash in the "Model" column represents the base MLLMs. + +
BaseModelMethodStageIAHSRAPHFRPOPVLOFAHC
QW2.5-VL [46]-Vani.Overall0.220.420.000.180.501.960.743.943.923.70
Jail.Overall2.782.684.363.044.084.043.143.564.323.62
R1-OV [38]Vani.Think3.302.574.122.943.484.173.103.404.003.22
Answer4.003.193.983.433.944.593.393.763.944.18
Overall3.442.663.843.143.664.302.983.224.063.64
Jail.Think4.123.794.864.584.444.204.523.403.804.34
Answer4.673.884.914.604.514.384.583.673.874.02
Overall4.183.784.884.624.544.484.523.583.883.98
Eureka-QW [14]Vani.Think2.891.613.262.142.184.042.283.303.903.54
Answer2.501.583.021.941.864.142.123.143.643.50
Overall2.501.503.041.901.844.002.303.303.543.28
Jail.Think3.142.843.903.343.464.183.503.383.783.62
Answer3.262.984.123.603.604.143.663.263.723.36
Overall3.322.83.983.523.584.183.543.523.863.60
QW2-VL [47]-Vani.Overall0.280.340.280.060.062.240.763.083.923.68
Jail.Overall3.302.744.823.344.404.483.824.044.284.40
MBerry-QW [39]Vani.Think2.722.423.142.392.623.372.622.713.243.33
Answer2.962.473.452.682.964.102.843.803.653.92
Overall2.882.303.422.463.064.162.683.723.663.86
Jail.Think4.153.954.484.104.314.364.163.733.893.82
Answer4.464.084.804.384.564.664.403.564.004.20
Overall4.694.244.884.424.644.524.463.464.004.18
LMA3-LVA [48]-Vani.Overall0.660.842.260.861.082.601.203.323.863.58
Jail.Overall4.223.064.484.364.243.764.043.323.803.85
MBerry-LVA [39]Vani.Overall1.901.463.681.523.283.482.223.624.043.64
Jail.Overall4.383.564.424.083.944.043.843.383.383.58
LMA-3.2-Vision [43]-Vani.Overall1.781.361.200.721.684.301.763.904.404.00
Jail.Overall2.923.304.263.863.644.223.583.523.983.96
MBerry-LMA [39]Vani.Overall1.180.721.700.881.003.181.123.844.243.64
Jail.Overall3.964.044.744.524.484.463.943.903.984.20
+ +# Takeaway 3.1: Reasoning Tax in MLRMs + +SFT/RL-based reasoning acquisition severely compromises safety alignment of base MLLMs, a phenomenon we term the "reasoning tax" in MLRMs. + +# 3.2 Safety Blind Spots + +We conduct fine-grained analysis to uncover safety blind spots—scenarios where MLRMs exhibit catastrophic safety failures despite base MLLMs achieving near-perfect alignment. We ask: (1) Do MLRMs inherit base models' scenario-specific safety profiles? (2) Does the reasoning tax manifest heterogeneously across scenarios? + +Experimental Protocol. Following [37], we evaluate 10 safety-critical scenarios: Illegal Activity, Hate Speech, Malware Generation, Physical Harm, Fraud, Pornography, Privacy Violence, Legal Opinion, Financial Advice, and Consultation. Comparison of HR scores across various scenarios on MM-SafetyBench and SafetyBench datasets are exhibited in Table 1 and 2, respectively. + +![](images/60217ca26d6c4706bc4f998ce4df4e04632607ededf903ac42f70a86fd91f47d.jpg) + +![](images/53e2c0898fa6c870825288302fce6eecd6766b8ee84334a014b1d11362a0eb5f.jpg) + +![](images/57a25f7f7f4c6c142c2394afd0944b8dcc8625bffd742589d64785330fa33d91.jpg) + +![](images/96d0037c6b1f0c919e8f64e91c808aa868ab3b2d406f3824f52753c349cc06d0.jpg) + +![](images/574b87aa30ef2437f88de2908076b3e19b49865518778ec5150bd86129cf9bef.jpg) + +![](images/bc774ec0c51aac450e2ceec06bbe9b1f0ea58d88ce42c26e8fd04ed1ac78bfe8.jpg) + +![](images/04693c91aafe664ff917e72897465625489f7fd357aabc415b90861cb283f963.jpg) + +![](images/4cdb68a77c41887d4e526d090e30fd1a2ff06614cb2e14b5073428ed1d868612.jpg) + +![](images/6356fd790de32a30a73b6fbc40b822e226a13a741a8e1a8e4c327465cad11262.jpg) + +![](images/55de58692bb2a404a8a856a334e4abff7819345d1afe93f4fd164a052c994d15.jpg) + +![](images/149faa88859aa02e53597cc89337335a33b638b96b121b487ef6244ed69951a5.jpg) + +![](images/65d048820b8a11abb71c8e4904d33e7f6a604b77a10e35895962f2dc0c344fad.jpg) + +![](images/7a14c4ceaa3d3d387e95528365cd5045cb9fb35a9973efd78da0869bd0346884.jpg) +Figure 3: Comparison of ASR scores across different MLRMs and their base MLLMs. For abbreviations, va. and ja. refer to performance under vanilla unsafe text queries and jailbreak attacks, respectively. We use MB to denote MLRM that are developed with MBerry method. + +![](images/06be97e39e60553cc8f956de76996a567c71d1a7d7613291a13fc8c3397cbf66.jpg) + +![](images/6c4e282129807050626985bcb6911ef73c9400789bbd8f7d6f064b92e0437af5.jpg) + +![](images/91406f4d686d264c572bc7d1bb07d6ec7d28811e9c5b704367ea8ea288d1c7f4.jpg) + +Additionally, we provide a more intuitive display for ASR in the form of a radar chart, as shown in Figure 3. + +Key Findings. Experimental results reveal alarming safety blind spots. For instance, in I11legal Activity scenario, MLRMs show $3.79 \times$ higher ASR than base MLLMs on average. Furthermore, while the base MLLM Qwen2.5-VL achieves near-perfect safety (ASR $< 3\%$ ), its MLRM derivative R1-Onevision suffers catastrophic failure (ASR $> 50\%$ ), achieving near $25 \times$ degradation. This exposes catastrophic alignment erosion in reasoning-enhanced architectures. Overall, safety degradation varies dramatically across scenarios ( $\Delta$ ASR range: $8.1\% - 2500\%$ ), with I11legal Activity/Pornography being most/least affected scenarios, demanding urgent scenario-specific red teaming and adaptive alignment protocols for MLRMs. + +# Takeaway 3.2: Safety Blind Spots + +MLRMs introduce critical safety blind spots — scenarios where base MLLMs excel (e.g., Illegal Activity) become catastrophic vulnerabilities post-reasoning augmentation. + +# 3.3 Emergent Self-Correction + +In this section, we aim to analyze the relationship between thinking safety and answer safety. This helps uncover potential correlations in the safety resilience across different stages of MLRM outputs, providing insights for constructing hierarchical defense strategies. + +Experimental Protocol. We select three MLRMs (i.e., R1-OneVision, MM-EUREKA-Qwen, Mulberry-Qwen), all strictly following "Think + Answer" output formats. From the 10 safety-critical scenarios, we randomly sample 100 adversarial queries per scenario, forming a 1000-sample test set. For each query, we compute Think-HR and Answer-HR under jailbreaking and visualize their normalized joint distribution via 2D heatmaps, as exhibited in Figure 4. + +Key Findings. Figure 4 reveals strong symmetric coupling between Think-HR and Answer-HR. Specifically, + +![](images/0f6b250de89a5323a9368b9e1db8598f144284be977a86440193d4c19b46cf1a.jpg) + +![](images/b96251a71647018544dd68520a8308c9c7cfc9cc7569ff38fe7cb1d03bdbe385.jpg) + +![](images/98b5b441b4b4a884c9266ad7fc01d2d0e1969952264386748b5b2cf63ca2e5a6.jpg) + +![](images/ffdc7b83ae60bf79a28deb1a926f46e94e6cc41907599645a0ec1bc82fd49706.jpg) +Figure 4: The relationship between reasoning safety and answer safety, where the horizontal and vertical axes represent HR scores. The numbers in the color blocks represent the normalized probabilities, with deeper colors indicating higher probabilities. Best viewed in color. + +![](images/feec6998f962b31d78f94fa1bbbcb11311f15d2b33513d67b19b41c78845006e.jpg) + +![](images/4f65d845f53f43f7c10558a1e1c58dbf484417bc971c452cf7673bb398f6dcc5.jpg) + +- Reasoning-Answer Coupling: When reasoning steps are compromised (Think-HR > 3), answer jailbreaking success rate reaches $90.9\%$ , indicating that unsafe cognitive frameworks propagate to outputs. Conversely, $93.7\%$ of compromised answers (Answer-HR > 3) originate from unsafe reasoning steps, suggesting output vulnerabilities necessitate but are not fully determined by reasoning flaws. +- Emergent Self-Correction: A subset of MLRMs (e.g., Mulberry-Qwen) exhibit right-upper quadrant clustering in heatmaps, where $12.4\%$ of unsafe reasoning steps (Think-HR $>3$ ) yield safe answers (Answer-HR $\leq 3$ ), exhibiting emergent self-correction capabilities in MLRMs. This nascent capability provides a foothold for layered defenses: hardening reasoning steps while amplifying innate safeguards. + +# Takeaway 3.3: Emergent Self-Correction + +MLRMs exhibit intrinsic self-correction: $12.4\%$ of unsafe reasoning chains are overridden by safe answers, revealing preserved safeguards that reject harmful cognition. + +# 4 Related Work + +Multi-modal Reasoning in Language Models. CoT reasoning process has proven effective for enhancing multi-modal capabilities in language models [25]. Initial approaches focused on prompt engineering such as constructing zero-shot prompts like "think step-by-step to understand the given text and image inputs" directly [17]. In contrast, the cascade paradigm achieved multi-modal reasoning asynchronously (e.g., using one model to describe an image for an LRM to reason) [24]. Reinforcement learning-based models such as LLaVA-Reasoner [9] and Insight-V [12] leveraged CoT datasets and direct preference optimization (DPO) with human feedback to acquire intrinsic reasoning skills. The success of DeepSeek-R1 further catalyzed this trend, inspiring derivative architectures such as Easy-R1 [49], R1-OneVision [50], Multimodal-Open-R1 [13], R1-V [51], VLM-R1 [10] and LMM-R1 [52]. Notably, process reward models (PRMs) like MSTaR [241] and VisualPRM [242] represent a paradigm shift from outcome-focused reward models (ORMs). By providing stepwise feedback during reasoning, PRMs enhance self-consistency and enable iterative self-improvement in MLLMs [25]. + +Safety Exploration of LRMs. The rapid advancement of LRMs has prompted growing attention to their safety risks. Recent work systematically evaluates mainstream LRMs across multiple dimensions [28, 35, 32, 33]. For example, [53] reveals significant gaps between open-source R1 models and commercial counterparts like o3-mini, while [54] verifies that DeepSeek-R1 failed to successfully block most harmful prompts, exposing critical vulnerabilities under adversarial conditions. Works by [34, 27, 55, 26, 30] further probe the safety boundaries of the LRM and identify their failure modes. Concurrently, researchers have begun investigating attack and defense paradigms specific to LRMs — [29] demonstrates that LRMs are uniquely vulnerable to fine-tuning attacks and [31] critiques the limitations of RL-based safety alignment (e.g., reward hacking, generalization failures) in mitigating harmful outputs. However, these efforts focus narrowly on unimodal LRMs, leaving the safety implications of MLRMs largely unexplored — a critical gap given the distinct risks introduced by cross-modal interactions inherent to MLRM architectures. + +# 5 Conclusion + +The rapid integration of reasoning capabilities into MLLMs has birthed powerful MLRMs with transformative potential. However, our systematic evaluation reveals that this advancement comes at a profound cost to safety. Through large-scale empirical studies comparing MLRMs with their base MLLMs, we uncover three critical insights: (1) A reasoning tax: MLRMs suffer $37.44\%$ higher jailbreaking rates than base MLLMs due to eroded safety alignment; (2) Scenario-specific blind spots: risks spike $25\times$ in contexts like Illegal Activity, far exceeding average degradation; (3) Emergent self-correction: Despite tight reasoning- answer safety coupling, MLRMs demonstrate nascent self-correction. That is, $16.23\%$ of jailbroken reasoning steps are overridden by safe answers, hinting at intrinsic safeguards. To catalyze mitigation, we release OpenSafeMLRM, an open-source toolkit with unified interface for evaluating mainstream MLRMs, datasets, and attacks. These findings mandate urgent efforts to govern how multi-modal AI reasons ensuring capability advances align with ethical imperatives. + +# References + +[1] DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, and S. S. Li. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. CoRR, abs/2501.12948, 2025. +[2] DeepSeek-AI, Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Daya Guo, Dejian Yang, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Haowei Zhang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Li, Hui Qu, J. L. Cai, Jian Liang, Jianzhong Guo, Jiaqi Ni, Jiashi Li, Jiawei Wang, Jin Chen, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, Junxiao Song, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Lei Xu, Leyi Xia, Liang Zhao, Litong Wang, Liyue Zhang, Meng Li, Miaojun Wang, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Mingming Li, Ning Tian, Panpan Huang, Peiyi Wang, Peng Zhang, Qiancheng Wang, Qihao Zhu, Qinyu Chen, Qiushi Du, R. J. Chen, R. L. Jin, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, Runxin Xu, Ruoyu Zhang, Ruyi Chen, S. S. Li, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shaoqing Wu + +Shengfeng Ye, Shengfeng Ye, Shirong Ma, Shiyu Wang, Shuang Zhou, Shuiping Yu, Shunfeng Zhou, Shuting Pan, T. Wang, Tao Yun, Tian Pei, Tianyu Sun, W. L. Xiao, and Wangding Zeng. Deepseek-v3 technical report. CoRR, abs/2412.19437, 2024. +[3] OpenAI. Learning to reason with LLMs. 2024. +[4] Edward Y. Chang, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. Demystifying long chain-of-thought reasoning in llms. CoRR, abs/2502.03373, 2025. +[5] Daya Guo, Qihao Zhu, Dejian Yang, Zhenda Xie, Kai Dong, Wentao Zhang, Guanting Chen, Xiao Bi, Yu Wu, YK Li, et al. Deepseek-coder: When the large language model meets programming-the rise of code intelligence. arXiv preprint arXiv:2401.14196, 2024. +[6] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. +[7] Jingyi Zhang, Jiaxing Huang, Huanjin Yao, Shunyu Liu, Xikun Zhang, Shijian Lu, and Dacheng Tao. R1-vl: Learning to reason with multimodal large language models via stepwise group relative policy optimization. 2025. arXiv preprint arXiv:2503.12397. +[8] Hengguang Zhou, Xinui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. R1-zero's "aha moment" in visual reasoning on a 2b non-sft model. 2025. +[9] Ruohong Zhang, Bowen Zhang, Yanghao Li, Haotian Zhang, Zhiqing Sun, Zhe Gan, Yinfei Yang, Ruoming Pang, and Yiming Yang. Improve vision language model chain-of-thought reasoning. arXiv preprint arXiv:2410.16198, 2024. +[10] Haozhan Shen, Zilun Zhang, Qianqian Zhang, Ruochen Xu, and Tiancheng Zhao. Vlm-r1: A stable and generalizable r1-style large vision-language model. 2025. Accessed: 2025-02-15. +[11] Liu Yuqi, Peng Bohao, Zhong Zhisheng, Yue Zihao, Lu Fanbin, Yu Bei, and Jia Jiaya. Seg-zero: Reasoning-chain guided segmentation via cognitive reinforcement. 2025. +[12] Yuhao Dong, Zuyan Liu, Hai-Long Sun, Jingkang Yang, Winston Hu, Yongming Rao, and Ziwei Liu. Insight-v: Exploring long-chain visual reasoning with multimodal large language models. arXiv preprint arXiv:2411.14432, 2024. +[13] EvolvingLMMs Lab. Multimodal open r1. GitHub repository, 2025. Accessed: 2025-02-28. +[14] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfen Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, Ping Luo, Yu Qiao, Qiaosheng Zhang, and Wenqi Shao. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. 2025. +[15] Zhangquan Chen, Xufang Luo, and Dongsheng Li. Visrl: Intention-driven visual perception via reinforced reasoning. 2025. arXiv preprint arXiv:2503.07523. +[16] Jiaxing Zhao, Xihan Wei, and Liefeng Bo. R1-omni: Explainable omni-multimodal emotion recognition with reinforcement learning. arXiv e-prints, pages arXiv-2503, 2025. +[17] Vaishnavi Himakunthala, Andy Ouyang, Daniel Rose, Ryan He, Alex Mei, Yujie Lu, Chinmay Sonar, Michael Saxon, and William Yang Wang. Let's think frame by frame with VIP: A video infilling and prediction dataset for evaluating video chain-of-thought. arXiv preprint arXiv:2305.13903, 2023. +[18] Fanxu Meng, Haotong Yang, Yiding Wang, and Muhan Zhang. Chain of images for intuitively reasoning. arXiv preprint arXiv:2311.09241, 2023. +[19] Jingran Xie, Shun Lei, Yue Yu, Yang Xiang, Hui Wang, Xixin Wu, and Zhiyong Wu. Leveraging chain of thought towards empathetic spoken dialogue without corresponding question answering data. arXiv preprint arXiv:2501.10937, 2025. + +[20] Xuewen Luo, Fan Ding, Yinsheng Song, Xiaofeng Zhang, and Junnyong Loo. Pkrd-cot: A unified chain-of-thought prompting for multi-modal large language models in autonomous driving. arXiv preprint arXiv:2412.02025, 2024. +[21] Haojie Zheng, Tianyang Xu, Hanchi Sun, Shu Pu, Ruoxi Chen, and Lichao Sun. Thinking before looking: Improving multimodal llm reasoning via mitigating visual hallucination. arXiv preprint arXiv:2411.12591, 2024. +[22] Timin Gao, Peixian Chen, Mengdan Zhang, Chaoyou Fu, Yunhang Shen, Yan Zhang, Shengchuan Zhang, Xiawu Zheng, Xing Sun, Liujuan Cao, et al. Cantor: Inspiring multimodal chain-of-thought of mllm. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 9096-9105, 2024. +[23] Wenshan Wu, Shaoguang Mao, Yadong Zhang, Yan Xia, Li Dong, Lei Cui, and Furu Wei. Mind's eye of llms: Visualization-of-thought elicits spatial reasoning in large language models. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024. +[24] Bozhi Luan, Hao Feng, Hong Chen, Yonghui Wang, Wengang Zhou, and Houqiang Li. Textcot: Zoom in for enhanced multimodal text-rich image understanding. arXiv preprint arXiv:2404.09797, 2024. +[25] Yaoting Wang, Shengqiong Wu, Yuecheng Zhang, William Wang, Ziwei Liu, Jiebo Luo, and Hao Fei. Multimodal chain-of-thought reasoning: A comprehensive survey. arXiv preprint arXiv:2503.12605, 2025. +[26] Chatterbox. Gen ai safety model comparison and reasoning results, 2025. +[27] Adversa.ai. Introducing safety aligned deepseek r1 model by enkrypt ai, 2025. +[28] Wenjing Zhang, Xuejiao Lei, Zhaoxiang Liu, Ning Wang, Zhenhong Long, Peijun Yang, Jiaojiao Zhao, Minjie Hua, Chaoyang Ma, Kai Wang, et al. Safety evaluation of deepseek models in chinese contexts. arXiv preprint arXiv:2502.11137, 2025. +[29] Zhiyuan Xu, Joseph Gardiner, and Sana Belguith. The dark deep side of deepseek: Fine-tuning attacks against the safety alignment of cot-enabled models. arXiv preprint arXiv:2502.01225, 2025. +[30] Haoyu Wang, Zeyu Qin, Li Shen, Xueqian Wang, Minhao Cheng, and Dacheng Tao. Leveraging reasoning with guidelines to elicit and utilize knowledge for enhancing safety alignment. arXiv preprint arXiv:2502.04040, 2025. +[31] Manojkumar Parmar and Yuvaraj Govindarajulu. Challenges in ensuring ai safety in deepseek-r1 models: The shortcomings of reinforcement learning strategies. arXiv preprint arXiv:2501.17030, 2025. +[32] Abhinav Kumar, Jaechul Roh, Ali Naseh, Marzena Karpinska, Mohit Iyyer, Amir Houmansadr, and Eugene Bagdasarian. Overthink: Slowdown attacks on reasoning llms. arXiv e-prints, page arXiv-2502, February 2025. +[33] Qiguang Chen, Libo Qin, Jiaqi Wang, Jingxuan Zhou, and Wanxiang Che. Unlocking the capabilities of thought: A reasoning boundary framework to quantify and optimize chain-of-thought. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024. +[34] Zonghao Ying, Guangyi Zheng, Yongxin Huang, Deyue Zhang, Wenxin Zhang, Quanchen Zou, Aishan Liu, Xianglong Liu, and Dacheng Tao. Towards understanding the safety boundaries of deepseek models: Evaluation and findings. arXiv preprint arXiv:2503.15092, 2025. +[35] Tiansheng Huang, Sihao Hu, Fatih Ilhan, Selim Furkan Tekin, Zachary Yahn, Yichang Xu, and Ling Liu. Safety tax: Safety alignment makes your large reasoning models less reasonable. arXiv preprint arXiv:2503.00555, 2025. +[36] Mang Ye, Xuankun Rong, Wenke Huang, Bo Du, Nenghai Yu, and Dacheng Tao. A survey of safety on large vision-language models: Attacks, defenses and evaluations. CoRR, abs/2502.14881, 2025. + +[37] Xin Liu, Yichen Zhu, Jindong Gu, Yunshi Lan, Chao Yang, and Yu Qiao. Mm-safetybench: A benchmark for safety evaluation of multimodal large language models. In ECCV (56), volume 15114 of Lecture Notes in Computer Science, pages 386-403. Springer, 2024. +[38] Yi Yang, Xiaoxuan He, Hongkun Pan, Xiyan Jiang, Yan Deng, Xingtao Yang, Haoyu Lu, Dacheng Yin, Fengyun Rao, Minfeng Zhu, Bo Zhang, and Wei Chen. R1-onevision: Advancing generalized multimodal reasoning through cross-modal formalization. arXiv preprint arXiv:2503.10615, 2025. +[39] Huanjin Yao, Jiaxing Huang, Wenhao Wu, Jingyi Zhang, Yibo Wang, Shunyu Liu, Yingjie Wang, Yuxin Song, Haocheng Feng, Li Shen, et al. Mulberry: Empowering mllm with o1-like reasoning and reflection via collective monte carlo tree search. arXiv preprint arXiv:2412.18319, 2024. +[40] Yichen Gong, Delong Ran, Jinyuan Liu, Conglei Wang, Tianshuo Cong, Anyu Wang, Sisi Duan, and Xiaoyun Wang. Figstep: Jailbreaking large vision-language models via typographic visual prompts. arXiv preprint arXiv:2311.05608, 2023. +[41] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023. +[42] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022. +[43] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Jasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. +[44] Yu Wang, Xiaofei Zhou, Yichen Wang, Geyuan Zhang, and Tianxing He. Jailbreak large visual language models through multi-modal linkage. arXiv preprint arXiv:2412.00473, 2024. +[45] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. +[46] Qwen Team. Qwen2.5-vl, January 2025. +[47] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. +[48] Bo Li, Kaichen Zhang, Hao Zhang, Dong Guo, Renrui Zhang, Feng Li, Yuanhan Zhang, Ziwei Liu, and Chunyuan Li. Llava-last: Stronger llms supercharge multimodal capabilities in the wild, May 2024. +[49] Zheng Yaowei, Lu Junting, Wang Shenzhi, Feng Zhangchi, Kuang Dongdong, and Xiong Yuwen. Easyr1: An efficient, scalable, multi-modality rl training framework. 2025. +[50] Yangyi VAI. R1-onevision: Open-source multimodal large language model with reasoning. Online, 2025. Available at: https://yangyi-vali.notion.site/r1-onevision#198b1e4047f780c78306fb451be7160d. +[51] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. 2025. Accessed: 2025-02-02. +[52] Peng Yingzhe, Zhang Gongrui, Zhang Miaosen, You Zhiyuan, Liu Jie, Zhu Qipeng, Yang Kai, Xu Xingzhong, Geng Xin, and Yang Xu. Lmm-r1: Empowering 3b lmm's with strong reasoning abilities through two-stage rule-based rl. 2025. + +[53] Aitor Arrieta, Miriam Ugarte, Pablo Valle, José Antonio Parejo, and Sergio Segura. O3-mini vs deepseek-r1: Which one is safer? arXiv preprint arXiv:2501.18438, 2025. +[54] Mantas Mazeika, Long Phan, Xuwang Yin, Andy Zou, Zifan Wang, Norman Mu, Elham Sakhaaee, Nathaniel Li, Steven Basart, Bo Li, et al. Harmbench: A standardized evaluation framework for automated red teaming and robust refusal. arXiv preprint arXiv:2402.04249, 2024. +[55] Kaiwen Zhou, Chengzhi Liu, Xuandong Zhao, Shreedhar Jangam, Jayanth Srinivasa, Gaowen Liu, Dawn Song, and Xin Eric Wang. The hidden risks of large reasoning models: A safety assessment of r1. arXiv preprint arXiv:2502.12659, 2025. \ No newline at end of file diff --git a/data/2025/2504_08xxx/2504.08813/images/04693c91aafe664ff917e72897465625489f7fd357aabc415b90861cb283f963.jpg b/data/2025/2504_08xxx/2504.08813/images/04693c91aafe664ff917e72897465625489f7fd357aabc415b90861cb283f963.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9bb29520a81e84fbd30a232b07a07a04a9fe3389 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/04693c91aafe664ff917e72897465625489f7fd357aabc415b90861cb283f963.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:881e9b492ccd966fd21ae93665a63a0f3e1981d29be6ba6cdab0e12bff9261ca +size 3918 diff --git a/data/2025/2504_08xxx/2504.08813/images/06be97e39e60553cc8f956de76996a567c71d1a7d7613291a13fc8c3397cbf66.jpg b/data/2025/2504_08xxx/2504.08813/images/06be97e39e60553cc8f956de76996a567c71d1a7d7613291a13fc8c3397cbf66.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7515ac2cbb87e980fe4ed853d7d5eb5feb66959a --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/06be97e39e60553cc8f956de76996a567c71d1a7d7613291a13fc8c3397cbf66.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6f465795f4c0e5f20eb1c7a93b2204aab0b2711ddd71f10aa49c5a1090df1304 +size 4098 diff --git a/data/2025/2504_08xxx/2504.08813/images/0f6b250de89a5323a9368b9e1db8598f144284be977a86440193d4c19b46cf1a.jpg b/data/2025/2504_08xxx/2504.08813/images/0f6b250de89a5323a9368b9e1db8598f144284be977a86440193d4c19b46cf1a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cbdc3e72634d680c2c821e5cd55c7cf47108e2b0 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/0f6b250de89a5323a9368b9e1db8598f144284be977a86440193d4c19b46cf1a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f48c034215879a9ac3a2101595e3503026493f5847d47f5783e8832ef206fd61 +size 14286 diff --git a/data/2025/2504_08xxx/2504.08813/images/149faa88859aa02e53597cc89337335a33b638b96b121b487ef6244ed69951a5.jpg b/data/2025/2504_08xxx/2504.08813/images/149faa88859aa02e53597cc89337335a33b638b96b121b487ef6244ed69951a5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..33602d587e677b0eabe9447e5f6abd348f38cfbd --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/149faa88859aa02e53597cc89337335a33b638b96b121b487ef6244ed69951a5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:caffdefc805a25427466dd78051ff6346e69353235b1491417d1d0b3cbd66689 +size 11907 diff --git a/data/2025/2504_08xxx/2504.08813/images/1fbee6b1ef5f8c6c74b3dc102f6621c06ea51843f6bf84e089618fe23a75e04a.jpg b/data/2025/2504_08xxx/2504.08813/images/1fbee6b1ef5f8c6c74b3dc102f6621c06ea51843f6bf84e089618fe23a75e04a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ae18e9d6b0f0709d35edb101ee02af686886058 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/1fbee6b1ef5f8c6c74b3dc102f6621c06ea51843f6bf84e089618fe23a75e04a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6ebe203ca7c26acdf0a9d795a544fbb0652e45ef9abf3789ce93f314ef078365 +size 14055 diff --git a/data/2025/2504_08xxx/2504.08813/images/2fca32aaf7e8dcca1bb4035fe1b80f62456b537f4e8f4441fc1dd307ddd4e2bf.jpg b/data/2025/2504_08xxx/2504.08813/images/2fca32aaf7e8dcca1bb4035fe1b80f62456b537f4e8f4441fc1dd307ddd4e2bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..786911c3e1e5eb9beceec799721291398c5c0b71 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/2fca32aaf7e8dcca1bb4035fe1b80f62456b537f4e8f4441fc1dd307ddd4e2bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:733712e136e794531aa812ba042f1cca5397b403696a3fd85c94a0a39d2926f4 +size 43604 diff --git a/data/2025/2504_08xxx/2504.08813/images/327d55b58bbda774ffb1eec6ecd34158a8773e8c4f874654e783879ab6338cd3.jpg b/data/2025/2504_08xxx/2504.08813/images/327d55b58bbda774ffb1eec6ecd34158a8773e8c4f874654e783879ab6338cd3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..199582325a744aaabd5e0b1ec045bce9001c0522 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/327d55b58bbda774ffb1eec6ecd34158a8773e8c4f874654e783879ab6338cd3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c8c92985c845ff22dac90b8febaa2401c92bac0207ecc7eab711f925782f818 +size 275853 diff --git a/data/2025/2504_08xxx/2504.08813/images/3b9bca542edeca217510c4d7e8e53b59bf563b76e85a29e386e4382336688c4d.jpg b/data/2025/2504_08xxx/2504.08813/images/3b9bca542edeca217510c4d7e8e53b59bf563b76e85a29e386e4382336688c4d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..898ba8e04816c42e5daf12a513965062847cbe26 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/3b9bca542edeca217510c4d7e8e53b59bf563b76e85a29e386e4382336688c4d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f3a7c37a5c280324e96124b59ff058cff01fef88ee0524225eb20ef0661cc6c +size 23512 diff --git a/data/2025/2504_08xxx/2504.08813/images/4cdb68a77c41887d4e526d090e30fd1a2ff06614cb2e14b5073428ed1d868612.jpg b/data/2025/2504_08xxx/2504.08813/images/4cdb68a77c41887d4e526d090e30fd1a2ff06614cb2e14b5073428ed1d868612.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6ae08c152f9ce4c73a26b2a9481ef21aab594a8a --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/4cdb68a77c41887d4e526d090e30fd1a2ff06614cb2e14b5073428ed1d868612.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19449a3596e26d06de2332fb0f0e0ccd4f8e54d9f5191871551eb74d3bcf3729 +size 6553 diff --git a/data/2025/2504_08xxx/2504.08813/images/4f65d845f53f43f7c10558a1e1c58dbf484417bc971c452cf7673bb398f6dcc5.jpg b/data/2025/2504_08xxx/2504.08813/images/4f65d845f53f43f7c10558a1e1c58dbf484417bc971c452cf7673bb398f6dcc5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7c523c16bc910e95f0e2451fec7de1f657a304e3 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/4f65d845f53f43f7c10558a1e1c58dbf484417bc971c452cf7673bb398f6dcc5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f0e60976ab0ad2fcc5009082d781e903f4ffd91e49bd6fb5018cba9d28bde57 +size 16491 diff --git a/data/2025/2504_08xxx/2504.08813/images/524be737f917bffa0d7a4defad3e62688344660ebd82d49f19e779fc51fd4ef3.jpg b/data/2025/2504_08xxx/2504.08813/images/524be737f917bffa0d7a4defad3e62688344660ebd82d49f19e779fc51fd4ef3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8fba622d697b377d48be3cd6fbefeeb4b6ae3377 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/524be737f917bffa0d7a4defad3e62688344660ebd82d49f19e779fc51fd4ef3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffcbe5882c08c4fef7a3f32f43019ff3c3d6e392e4094c0be35fc06762099820 +size 24009 diff --git a/data/2025/2504_08xxx/2504.08813/images/53e2c0898fa6c870825288302fce6eecd6766b8ee84334a014b1d11362a0eb5f.jpg b/data/2025/2504_08xxx/2504.08813/images/53e2c0898fa6c870825288302fce6eecd6766b8ee84334a014b1d11362a0eb5f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..617aef1db03f06a23a48d960c0f3b28011451e45 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/53e2c0898fa6c870825288302fce6eecd6766b8ee84334a014b1d11362a0eb5f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:30d3f1da08683f4cf97d5b1fe036bd08baf68b7673b18851cdfee85d35c5a8c6 +size 12235 diff --git a/data/2025/2504_08xxx/2504.08813/images/55de58692bb2a404a8a856a334e4abff7819345d1afe93f4fd164a052c994d15.jpg b/data/2025/2504_08xxx/2504.08813/images/55de58692bb2a404a8a856a334e4abff7819345d1afe93f4fd164a052c994d15.jpg new file mode 100644 index 0000000000000000000000000000000000000000..64b52c6d26095a353c2d970a60c0849a16438776 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/55de58692bb2a404a8a856a334e4abff7819345d1afe93f4fd164a052c994d15.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a73b11d45e66206f2828c71e9687615d66a2fd82fbc797f2725b2d812f8de228 +size 11264 diff --git a/data/2025/2504_08xxx/2504.08813/images/574b87aa30ef2437f88de2908076b3e19b49865518778ec5150bd86129cf9bef.jpg b/data/2025/2504_08xxx/2504.08813/images/574b87aa30ef2437f88de2908076b3e19b49865518778ec5150bd86129cf9bef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..25eeb3dab2611f2ba8e6dcac835eab9e755b4999 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/574b87aa30ef2437f88de2908076b3e19b49865518778ec5150bd86129cf9bef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:659ab7318c24ca4085cc0dd29330dc2efb4308207abc39ff6817d35e9e3986c4 +size 4287 diff --git a/data/2025/2504_08xxx/2504.08813/images/57a25f7f7f4c6c142c2394afd0944b8dcc8625bffd742589d64785330fa33d91.jpg b/data/2025/2504_08xxx/2504.08813/images/57a25f7f7f4c6c142c2394afd0944b8dcc8625bffd742589d64785330fa33d91.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0036410e8873cd677b631f588c4edba1aef3e941 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/57a25f7f7f4c6c142c2394afd0944b8dcc8625bffd742589d64785330fa33d91.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:180bc786d32a668a7d5469a7f6e1c63fce80abee5c779c12d427b3023fd8e5a6 +size 12885 diff --git a/data/2025/2504_08xxx/2504.08813/images/5baac71a839d0492466b5b3bea9347d1027212b1c74ece6233e117dac9f1c557.jpg b/data/2025/2504_08xxx/2504.08813/images/5baac71a839d0492466b5b3bea9347d1027212b1c74ece6233e117dac9f1c557.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a86c338bc2c794888bd0e7c2f4b7c65ec8d61c53 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/5baac71a839d0492466b5b3bea9347d1027212b1c74ece6233e117dac9f1c557.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b2d1216f178de0a4348d937d3b837c14cdbc1a58761406fe119dd7353bc1d615 +size 1103 diff --git a/data/2025/2504_08xxx/2504.08813/images/60217ca26d6c4706bc4f998ce4df4e04632607ededf903ac42f70a86fd91f47d.jpg b/data/2025/2504_08xxx/2504.08813/images/60217ca26d6c4706bc4f998ce4df4e04632607ededf903ac42f70a86fd91f47d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..01a035adbcbb28b896d1c561aecfe5a1815a6244 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/60217ca26d6c4706bc4f998ce4df4e04632607ededf903ac42f70a86fd91f47d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:31a361142cf1464556c00f591c988d773cb55e3cd15680aff192a342a2323cfc +size 13726 diff --git a/data/2025/2504_08xxx/2504.08813/images/6356fd790de32a30a73b6fbc40b822e226a13a741a8e1a8e4c327465cad11262.jpg b/data/2025/2504_08xxx/2504.08813/images/6356fd790de32a30a73b6fbc40b822e226a13a741a8e1a8e4c327465cad11262.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f53921793664ffaba13b6c980592908bfc6045ac --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/6356fd790de32a30a73b6fbc40b822e226a13a741a8e1a8e4c327465cad11262.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:88a3140493da88087709165641b56638a554ac85a956847522f9325a7d790a18 +size 12648 diff --git a/data/2025/2504_08xxx/2504.08813/images/65d048820b8a11abb71c8e4904d33e7f6a604b77a10e35895962f2dc0c344fad.jpg b/data/2025/2504_08xxx/2504.08813/images/65d048820b8a11abb71c8e4904d33e7f6a604b77a10e35895962f2dc0c344fad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4b3daadcfb478c64594d3976e9fda95eed378f11 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/65d048820b8a11abb71c8e4904d33e7f6a604b77a10e35895962f2dc0c344fad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:08b6e38ee2a4352ab00dd7791439824d83c7aefadb5f1d3309cd0c24b3eaa6cd +size 12206 diff --git a/data/2025/2504_08xxx/2504.08813/images/6c4e282129807050626985bcb6911ef73c9400789bbd8f7d6f064b92e0437af5.jpg b/data/2025/2504_08xxx/2504.08813/images/6c4e282129807050626985bcb6911ef73c9400789bbd8f7d6f064b92e0437af5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1320a2883f3640fd985f3faf257dae3a3b9aab47 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/6c4e282129807050626985bcb6911ef73c9400789bbd8f7d6f064b92e0437af5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa99e2876355c30b8662ec784d16bbc672cffd85f5b7906587cab95831032c65 +size 3828 diff --git a/data/2025/2504_08xxx/2504.08813/images/73d5d2df663c13e44315c732c954870c81e665df6eee922f309b5ac748178580.jpg b/data/2025/2504_08xxx/2504.08813/images/73d5d2df663c13e44315c732c954870c81e665df6eee922f309b5ac748178580.jpg new file mode 100644 index 0000000000000000000000000000000000000000..027ead34b5ad5ab0d930d9072068f3ecefb0d4b6 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/73d5d2df663c13e44315c732c954870c81e665df6eee922f309b5ac748178580.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19b11c4a70b58e14b6f8bde4750818cec845ee251b1ab141902276162af2e595 +size 14136 diff --git a/data/2025/2504_08xxx/2504.08813/images/7a14c4ceaa3d3d387e95528365cd5045cb9fb35a9973efd78da0869bd0346884.jpg b/data/2025/2504_08xxx/2504.08813/images/7a14c4ceaa3d3d387e95528365cd5045cb9fb35a9973efd78da0869bd0346884.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5da74dbdd9b1876db091894aa3e7591747b54917 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/7a14c4ceaa3d3d387e95528365cd5045cb9fb35a9973efd78da0869bd0346884.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c63d2f72e7e94aecb0d8c612377f74c2973bb7a5ee5622727a274c2ec5802e04 +size 4054 diff --git a/data/2025/2504_08xxx/2504.08813/images/91406f4d686d264c572bc7d1bb07d6ec7d28811e9c5b704367ea8ea288d1c7f4.jpg b/data/2025/2504_08xxx/2504.08813/images/91406f4d686d264c572bc7d1bb07d6ec7d28811e9c5b704367ea8ea288d1c7f4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c4cb6c130398de7d5c33b312f7c8d5eb51d56c46 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/91406f4d686d264c572bc7d1bb07d6ec7d28811e9c5b704367ea8ea288d1c7f4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5088793bcb7cf6f1a5b58ebcdad6f7ac2aaf7816c55ef4534c2e4f2de824a36d +size 6552 diff --git a/data/2025/2504_08xxx/2504.08813/images/96d0037c6b1f0c919e8f64e91c808aa868ab3b2d406f3824f52753c349cc06d0.jpg b/data/2025/2504_08xxx/2504.08813/images/96d0037c6b1f0c919e8f64e91c808aa868ab3b2d406f3824f52753c349cc06d0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..914669bc494d9c9b0a255f7e48fc15a1d1590f03 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/96d0037c6b1f0c919e8f64e91c808aa868ab3b2d406f3824f52753c349cc06d0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:063bb6e165505c06f06754c567ebd3262c14ecb2cf2366bda0a3564f675786f7 +size 13718 diff --git a/data/2025/2504_08xxx/2504.08813/images/98b5b441b4b4a884c9266ad7fc01d2d0e1969952264386748b5b2cf63ca2e5a6.jpg b/data/2025/2504_08xxx/2504.08813/images/98b5b441b4b4a884c9266ad7fc01d2d0e1969952264386748b5b2cf63ca2e5a6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fb5abbf9aa74ae978547ffd5add67c6e6a4bbd96 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/98b5b441b4b4a884c9266ad7fc01d2d0e1969952264386748b5b2cf63ca2e5a6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d62744d67cb41b9b7c43bb4a1f25cab1bba83a4471083155440643eb931fc0f8 +size 16847 diff --git a/data/2025/2504_08xxx/2504.08813/images/b96251a71647018544dd68520a8308c9c7cfc9cc7569ff38fe7cb1d03bdbe385.jpg b/data/2025/2504_08xxx/2504.08813/images/b96251a71647018544dd68520a8308c9c7cfc9cc7569ff38fe7cb1d03bdbe385.jpg new file mode 100644 index 0000000000000000000000000000000000000000..930cb29e2efa226ddc4d38030aa9f11f9106e247 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/b96251a71647018544dd68520a8308c9c7cfc9cc7569ff38fe7cb1d03bdbe385.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:81af28e576520d3e7fa134de84ca6572344579b1178033c0d4aa95eeff7bf82d +size 13275 diff --git a/data/2025/2504_08xxx/2504.08813/images/bc774ec0c51aac450e2ceec06bbe9b1f0ea58d88ce42c26e8fd04ed1ac78bfe8.jpg b/data/2025/2504_08xxx/2504.08813/images/bc774ec0c51aac450e2ceec06bbe9b1f0ea58d88ce42c26e8fd04ed1ac78bfe8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5062b153333ece6dc175280d0570c879679a33df --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/bc774ec0c51aac450e2ceec06bbe9b1f0ea58d88ce42c26e8fd04ed1ac78bfe8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2585f81f2688abc67229ddbdb9d48afe06c02c435adbbaf196af268fe0ec7f8f +size 4191 diff --git a/data/2025/2504_08xxx/2504.08813/images/c06dc7b23f1c52b1a3bd45fbef39d0292edf091479515efa9369e608e17ed8ca.jpg b/data/2025/2504_08xxx/2504.08813/images/c06dc7b23f1c52b1a3bd45fbef39d0292edf091479515efa9369e608e17ed8ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..023a51223b64a8410e2c86362a2cae24777256df --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/c06dc7b23f1c52b1a3bd45fbef39d0292edf091479515efa9369e608e17ed8ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e655845d7903ede9717e04d6614dc3db2c5d1fc332157bb3263246002990ba9c +size 276155 diff --git a/data/2025/2504_08xxx/2504.08813/images/dff095cff933feba5a38f3aee9fb251ef563c1d002268138fcfd04d14cdacb37.jpg b/data/2025/2504_08xxx/2504.08813/images/dff095cff933feba5a38f3aee9fb251ef563c1d002268138fcfd04d14cdacb37.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3404f7f8f274bec61fe71c647541ea9f719217e9 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/dff095cff933feba5a38f3aee9fb251ef563c1d002268138fcfd04d14cdacb37.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:79b70ad77b2ab677d79b59ba5204212e8ba13f846ca5ba84d6795fecc127b3a9 +size 21638 diff --git a/data/2025/2504_08xxx/2504.08813/images/e20ee32ac3bfeea3c5379d14e5dbc30e5c7bab73b7bd426f988e4f8b24d0ea6d.jpg b/data/2025/2504_08xxx/2504.08813/images/e20ee32ac3bfeea3c5379d14e5dbc30e5c7bab73b7bd426f988e4f8b24d0ea6d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5ee2a9d77e9d2a63ae096b6759ce8dfa9cec4ac4 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/e20ee32ac3bfeea3c5379d14e5dbc30e5c7bab73b7bd426f988e4f8b24d0ea6d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f67ac9c4b3dd4e7fdd4b59727062abb3224412dba3e77ffd0339fd77597876da +size 20994 diff --git a/data/2025/2504_08xxx/2504.08813/images/f2de661e7d249e4c61df89f13c907384e6f0622209aab49011406c1b6d9ff010.jpg b/data/2025/2504_08xxx/2504.08813/images/f2de661e7d249e4c61df89f13c907384e6f0622209aab49011406c1b6d9ff010.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5169c64cf154ec66bf393119742cad5b64e737e0 --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/f2de661e7d249e4c61df89f13c907384e6f0622209aab49011406c1b6d9ff010.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b37d6131433cc16f1626de498ff9bb168553981c578c4e330180a50f9deb846f +size 44086 diff --git a/data/2025/2504_08xxx/2504.08813/images/feec6998f962b31d78f94fa1bbbcb11311f15d2b33513d67b19b41c78845006e.jpg b/data/2025/2504_08xxx/2504.08813/images/feec6998f962b31d78f94fa1bbbcb11311f15d2b33513d67b19b41c78845006e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..41d7204f8afdaba614e269604f5678136ad8b7ca --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/feec6998f962b31d78f94fa1bbbcb11311f15d2b33513d67b19b41c78845006e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49706aeb58270d9f947f06a6dc88c68630f85ac1759086fa5c783929d5ad991a +size 13678 diff --git a/data/2025/2504_08xxx/2504.08813/images/ffdc7b83ae60bf79a28deb1a926f46e94e6cc41907599645a0ec1bc82fd49706.jpg b/data/2025/2504_08xxx/2504.08813/images/ffdc7b83ae60bf79a28deb1a926f46e94e6cc41907599645a0ec1bc82fd49706.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c9c2923cd7394aaa64084410ef569012c5e5f39c --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/images/ffdc7b83ae60bf79a28deb1a926f46e94e6cc41907599645a0ec1bc82fd49706.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8418407631e265f76a368b2e83ae120359356b3669f0cf63d7ab1c5baf5ead9e +size 14914 diff --git a/data/2025/2504_08xxx/2504.08813/layout.json b/data/2025/2504_08xxx/2504.08813/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..514563bc1566d1fdba71bf35112dad42e4a7bf6b --- /dev/null +++ b/data/2025/2504_08xxx/2504.08813/layout.json @@ -0,0 +1,7136 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 123, + 96, + 487, + 137 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 96, + 487, + 137 + ], + "spans": [ + { + "bbox": [ + 123, + 96, + 487, + 137 + ], + "type": "text", + "content": "SafeMLRM: Demystifying Safety in Multi-modal Large Reasoning Models" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 177, + 178, + 432, + 202 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 177, + 178, + 432, + 202 + ], + "spans": [ + { + "bbox": [ + 177, + 178, + 432, + 202 + ], + "type": "text", + "content": "Junfeng Fang" + }, + { + "bbox": [ + 177, + 178, + 432, + 202 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 177, + 178, + 432, + 202 + ], + "type": "text", + "content": ", Yukai Wang" + }, + { + "bbox": [ + 177, + 178, + 432, + 202 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 177, + 178, + 432, + 202 + ], + "type": "text", + "content": ", Ruipeng Wang" + }, + { + "bbox": [ + 177, + 178, + 432, + 202 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 177, + 178, + 432, + 202 + ], + "type": "text", + "content": ", Zijun Yao" + }, + { + "bbox": [ + 177, + 178, + 432, + 202 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 177, + 178, + 432, + 202 + ], + "type": "text", + "content": ", Kun Wang" + }, + { + "bbox": [ + 177, + 178, + 432, + 202 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 177, + 178, + 432, + 202 + ], + "type": "text", + "content": ", An Zhang" + }, + { + "bbox": [ + 177, + 178, + 432, + 202 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 177, + 178, + 432, + 202 + ], + "type": "text", + "content": ", Xiang Wang" + }, + { + "bbox": [ + 177, + 178, + 432, + 202 + ], + "type": "inline_equation", + "content": "^{2*}" + }, + { + "bbox": [ + 177, + 178, + 432, + 202 + ], + "type": "text", + "content": ", Tat-Seng Chua" + }, + { + "bbox": [ + 177, + 178, + 432, + 202 + ], + "type": "inline_equation", + "content": "^{1}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 137, + 202, + 474, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 137, + 202, + 474, + 236 + ], + "spans": [ + { + "bbox": [ + 137, + 202, + 474, + 236 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 137, + 202, + 474, + 236 + ], + "type": "text", + "content": "National University of Singapore, " + }, + { + "bbox": [ + 137, + 202, + 474, + 236 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 137, + 202, + 474, + 236 + ], + "type": "text", + "content": "University of Science and Technology of China \n" + }, + { + "bbox": [ + 137, + 202, + 474, + 236 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 137, + 202, + 474, + 236 + ], + "type": "text", + "content": "Nanyang Technological University \nfangjf1997@gmail.com" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 281, + 264, + 329, + 277 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 264, + 329, + 277 + ], + "spans": [ + { + "bbox": [ + 281, + 264, + 329, + 277 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 140, + 289, + 470, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 289, + 470, + 540 + ], + "spans": [ + { + "bbox": [ + 140, + 289, + 470, + 540 + ], + "type": "text", + "content": "The rapid advancement of multi-modal large reasoning models (MLRMs) — enhanced versions of multimodal language models (MLLMs) equipped with reasoning capabilities — has revolutionized diverse applications. However, their safety implications remain underexplored. While prior work has exposed critical vulnerabilities in unimodal reasoning models, MLRMs introduce distinct risks from cross-modal reasoning pathways. This work presents the first systematic safety analysis of MLRMs through large-scale empirical studies comparing MLRMs with their base MLLMs. Our experiments reveal three critical findings: (1) The Reasoning Tax: Acquiring reasoning capabilities catastrophically degrades inherited safety alignment. MLRMs exhibit " + }, + { + "bbox": [ + 140, + 289, + 470, + 540 + ], + "type": "inline_equation", + "content": "37.44\\%" + }, + { + "bbox": [ + 140, + 289, + 470, + 540 + ], + "type": "text", + "content": " higher jailbreaking success rates than base MLLMs under adversarial attacks. (2) Safety Blind Spots: While safety degradation is pervasive, certain scenarios (e.g., Illegal Activity) suffer " + }, + { + "bbox": [ + 140, + 289, + 470, + 540 + ], + "type": "inline_equation", + "content": "25\\times" + }, + { + "bbox": [ + 140, + 289, + 470, + 540 + ], + "type": "text", + "content": " higher attack rates — far exceeding the average " + }, + { + "bbox": [ + 140, + 289, + 470, + 540 + ], + "type": "inline_equation", + "content": "3.4\\times" + }, + { + "bbox": [ + 140, + 289, + 470, + 540 + ], + "type": "text", + "content": " increase, revealing scenario-specific vulnerabilities with alarming cross-model and datasets consistency. (3) Emergent Self-Correction: Despite tight reasoning-answer safety coupling, MLRMs demonstrate nascent self-correction — " + }, + { + "bbox": [ + 140, + 289, + 470, + 540 + ], + "type": "inline_equation", + "content": "16.9\\%" + }, + { + "bbox": [ + 140, + 289, + 470, + 540 + ], + "type": "text", + "content": " of jailbroken reasoning steps are overridden by safe answers, hinting at intrinsic safeguards. These findings underscore the urgency of scenario-aware safety auditing and mechanisms to amplify MLRMs' self-correction potential. To catalyze research, we open-source OpenSafeMLRM, the first toolkit for MLRM safety evaluation, providing unified interface for mainstream models, datasets, and jailbreaking methods. Our work calls for immediate efforts to harden reasoning-augmented AI, ensuring its transformative potential aligns with ethical safeguards." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 164, + 548, + 177, + 560 + ], + "blocks": [ + { + "bbox": [ + 164, + 548, + 177, + 560 + ], + "lines": [ + { + "bbox": [ + 164, + 548, + 177, + 560 + ], + "spans": [ + { + "bbox": [ + 164, + 548, + 177, + 560 + ], + "type": "image", + "image_path": "5baac71a839d0492466b5b3bea9347d1027212b1c74ece6233e117dac9f1c557.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "bbox": [ + 178, + 549, + 240, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 178, + 549, + 240, + 560 + ], + "spans": [ + { + "bbox": [ + 178, + 549, + 240, + 560 + ], + "type": "text", + "content": "OpenSafeMLRM" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 249, + 549, + 446, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 249, + 549, + 446, + 560 + ], + "spans": [ + { + "bbox": [ + 249, + 549, + 446, + 560 + ], + "type": "text", + "content": "https://github.com/fangjf1/OpenSafeMLRM" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 581, + 192, + 594 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 581, + 192, + 594 + ], + "spans": [ + { + "bbox": [ + 105, + 581, + 192, + 594 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 605, + 506, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 605, + 506, + 716 + ], + "spans": [ + { + "bbox": [ + 104, + 605, + 506, + 716 + ], + "type": "text", + "content": "With the rapid advancement of AI, large reasoning models (LRMs) like the DeepSeek series have gained significant attention [1, 2, 3, 4]. These models excel in performing complex tasks through meticulous reasoning, enabling transformative impacts across various downstream domains [5, 6]. Recently, this influence has expanded to multi-modal applications [7, 8, 9]. By embedding chain-of-thought data during the fine-tuning or reinforcement learning phrase of base multi-modal large language models (MLLMs) [10, 11, 12], researchers have developed Multi-modal Large Reasoning Models (MLRMs) [13, 14, 15, 16]. Compared to prompt-based multi-modal reasoning (e.g., instructing base model with \"Please think step-by-step\") [17, 18, 19, 20], MLRMs inherently encode reasoning process, demonstrating greater potential in alignment with human intentions; in contrast to asynchronous multi-modal reasoning paradigm (e.g., using one model to describe an" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "spans": [ + { + "bbox": [ + 14, + 213, + 37, + 555 + ], + "type": "text", + "content": "arXiv:2504.08813v1 [cs.LG] 9 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 723, + 313, + 734 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 723, + 313, + 734 + ], + "spans": [ + { + "bbox": [ + 117, + 723, + 313, + 734 + ], + "type": "text", + "content": "*Corresponding author: xiangwang1223@gmail.com." + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": "image for an LRM to reason) [21, 22, 23, 24], MLRMs are end-to-end models rather than cascaded pipelines, avoiding compounding errors and modality-specific information loss. These advantages position MLRM as a cornerstone for future advancements in AI reasoning [25]." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 110, + 506, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 110, + 506, + 210 + ], + "spans": [ + { + "bbox": [ + 104, + 110, + 506, + 210 + ], + "type": "text", + "content": "However, alongside these reasoning advancements, model safety concerns have become increasingly critical [26, 27, 28], particularly the risks of unsafe content generation [29, 30]. Recent community efforts have evaluated the safety capabilities of mainstream LRMs like DeepSeek, especially their resilience to jailbreaking attacks [31, 32, 33]. These studies reveal urgent findings, demonstrating that exposure of reasoning processes amplifies safety risks, with attack success rates rise dramatically [34, 35]. While these findings motivate deeper investigations into LRM safety, the safety protocol of MLRMs remains underexplored. This gap demands imperative attention: multi-modal integration introduces novel attack vectors like cross-modal adversarial triggers, fundamentally expanding the threat surface beyond traditional text-only vulnerabilities [36, 37]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 214, + 506, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 214, + 506, + 315 + ], + "spans": [ + { + "bbox": [ + 104, + 214, + 506, + 315 + ], + "type": "text", + "content": "This study conducts the first systematic safety evaluation of advancing MLRMs. To catalyze this, we open-source OpenSafeMLRM, the first toolkit for MLRM safety evaluation, providing unified interface for various models, datasets, and jailbreaking methods. With OpenSafeMLRM, we evaluate mainstream MLRMs such as R1-Onevision [38], MM-Eureka-Qwen [14], Mulberry-Qwen2VL [39], Mulberry-Llava [39] and Mulberry-Llama [39] across 10 canonical unsafe scenarios curated from MM-SafetyBench [37], and further adapt black-box jailbreaking methods traditionally used for MLLMs [36] for probing MLRMs' safety resilience. All evaluations are applied to both target MLRMs and their base MLLMs. This comparative framework allows us to quantify how reasoning capability acquisition impacts the safety protocols." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 103, + 318, + 504, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 103, + 318, + 504, + 417 + ], + "spans": [ + { + "bbox": [ + 103, + 318, + 504, + 417 + ], + "type": "text", + "content": "Empirical results reveal several novel findings: (1) Reasoning Tax: The pursuit of advanced reasoning capabilities via supervised fine-tuning (SFT)/reinforcement learning (RL) imposes a steep safety cost — MLRMs exhibit " + }, + { + "bbox": [ + 103, + 318, + 504, + 417 + ], + "type": "inline_equation", + "content": "37.44\\%" + }, + { + "bbox": [ + 103, + 318, + 504, + 417 + ], + "type": "text", + "content": " higher attack success rates than their base MLLMs, with safety alignment catastrophically eroded during capability enhancement. (2) Safety Blind Spots: While safety degradation is pervasive, certain scenarios (e.g., Illegal Activity) suffer " + }, + { + "bbox": [ + 103, + 318, + 504, + 417 + ], + "type": "inline_equation", + "content": "25\\times" + }, + { + "bbox": [ + 103, + 318, + 504, + 417 + ], + "type": "text", + "content": " higher attack rates — far exceeding the average " + }, + { + "bbox": [ + 103, + 318, + 504, + 417 + ], + "type": "inline_equation", + "content": "3.4\\times" + }, + { + "bbox": [ + 103, + 318, + 504, + 417 + ], + "type": "text", + "content": " increase, revealing scenario-specific vulnerabilities with alarming cross-model and datasets consistency. (3) Emergent Self-Correction: Despite tight reasoning-answer safety coupling, MLRMs demonstrate nascent self-correction: " + }, + { + "bbox": [ + 103, + 318, + 504, + 417 + ], + "type": "inline_equation", + "content": "16.23\\%" + }, + { + "bbox": [ + 103, + 318, + 504, + 417 + ], + "type": "text", + "content": " of jailbroken reasoning steps are overridden by safe answers, hinting at intrinsic safeguards." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 422, + 506, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 422, + 506, + 510 + ], + "spans": [ + { + "bbox": [ + 104, + 422, + 506, + 510 + ], + "type": "text", + "content": "While our evaluation framework conduct comprehensive experiments, we acknowledge that selection bias in test samples and evaluation criteria design may inevitably introduce measurement distortions. To mitigate this, we will refine this evaluation framework by incorporating additional datasets, models, attack vectors, and defense baselines [36] to enhance its comprehensiveness and reliability. These findings reveal fundamental gaps in MLRMs' current safety mechanisms and expose the alarming efficacy of adversarial strategies in bypassing their safeguards. In summary, our work calls for immediate efforts to harden reasoning-augmented AI, ensuring its transformative potential aligns with ethical safeguards." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 529, + 229, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 529, + 229, + 544 + ], + "spans": [ + { + "bbox": [ + 105, + 529, + 229, + 544 + ], + "type": "text", + "content": "2 Experimental Setup" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 555, + 504, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 555, + 504, + 589 + ], + "spans": [ + { + "bbox": [ + 104, + 555, + 504, + 589 + ], + "type": "text", + "content": "This work aims to systematically investigate the safety of MLRMs and their safety degradation compared to base MLLMs. Here, we formalize the research aim of this paper and provided specific experimental configurations, including datasets, baseline models and evaluation metrics." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 121, + 600, + 181, + 611 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 600, + 181, + 611 + ], + "spans": [ + { + "bbox": [ + 121, + 600, + 181, + 611 + ], + "type": "text", + "content": "Research Aim" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 120, + 619, + 491, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 619, + 491, + 654 + ], + "spans": [ + { + "bbox": [ + 120, + 619, + 491, + 654 + ], + "type": "text", + "content": "Our aim is to explore the safety vulnerabilities in MLRMs. Through comprehensive analysis across various unsafe scenarios, we highlight key risks and compare the safety performance of MLRMs against their base MLLMs, providing valuable insights for enhancing their safety." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 677, + 276, + 690 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 677, + 276, + 690 + ], + "spans": [ + { + "bbox": [ + 105, + 677, + 276, + 690 + ], + "type": "text", + "content": "2.1 Datasets & Jailbreaking Methods." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "We evaluate our framework on two widely adopted benchmarks for MLLM safety: MM-SafetyBench [37] and SafetyBench [40]." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 70, + 272, + 289 + ], + "blocks": [ + { + "bbox": [ + 106, + 70, + 272, + 289 + ], + "lines": [ + { + "bbox": [ + 106, + 70, + 272, + 289 + ], + "spans": [ + { + "bbox": [ + 106, + 70, + 272, + 289 + ], + "type": "image", + "image_path": "2fca32aaf7e8dcca1bb4035fe1b80f62456b537f4e8f4441fc1dd307ddd4e2bf.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "lines": [ + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 504, + 327 + ], + "type": "text", + "content": "Figure 1: Comparison of ASR and HR scores across various MLRMs and their base MLLMs under vanilla unsafe text queries. The dark blue bars represent the ASR and HR of the base MLLMs. Best viewed in color." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 276, + 76, + 369, + 288 + ], + "blocks": [ + { + "bbox": [ + 276, + 76, + 369, + 288 + ], + "lines": [ + { + "bbox": [ + 276, + 76, + 369, + 288 + ], + "spans": [ + { + "bbox": [ + 276, + 76, + 369, + 288 + ], + "type": "image", + "image_path": "524be737f917bffa0d7a4defad3e62688344660ebd82d49f19e779fc51fd4ef3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 373, + 76, + 424, + 288 + ], + "blocks": [ + { + "bbox": [ + 373, + 76, + 424, + 288 + ], + "lines": [ + { + "bbox": [ + 373, + 76, + 424, + 288 + ], + "spans": [ + { + "bbox": [ + 373, + 76, + 424, + 288 + ], + "type": "image", + "image_path": "73d5d2df663c13e44315c732c954870c81e665df6eee922f309b5ac748178580.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 428, + 76, + 498, + 288 + ], + "blocks": [ + { + "bbox": [ + 428, + 76, + 498, + 288 + ], + "lines": [ + { + "bbox": [ + 428, + 76, + 498, + 288 + ], + "spans": [ + { + "bbox": [ + 428, + 76, + 498, + 288 + ], + "type": "image", + "image_path": "e20ee32ac3bfeea3c5379d14e5dbc30e5c7bab73b7bd426f988e4f8b24d0ea6d.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 335, + 506, + 489 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 105, + 335, + 504, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 335, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 105, + 335, + 504, + 422 + ], + "type": "text", + "content": "- MM-SafetyBench comprises 13 safety-critical scenarios (e.g., illegal activities, hate speech) that MLLMs should strictly prohibit. Constructed via the QueryRelated pipeline [37], this dataset first generates malicious queries across scenarios, then uses GPT-4 [41] to extract unsafe keywords and synthesize three image types: (1) Stable Diffusion (SD) [42] images: Visually aligned with unsafe keywords. (2) Text-overlaid images: Unsafe text superimposed on benign visuals. (3) Hybrid images: SD-generated visuals combined with adversarial typography. Following [37], we adopt hybrid images (empirically shown to be the most effective jailbreaking method among the three) as the jailbreaking method for evaluation." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 423, + 506, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 423, + 506, + 489 + ], + "spans": [ + { + "bbox": [ + 104, + 423, + 506, + 489 + ], + "type": "text", + "content": "- SafetyBench covers 10 prohibited topics curated from the OpenAI and Meta's Llama-2 [43] usage policies. Built via the FigStep pipeline [40], it leverages GPT-4 [41] to rewrite queries into instructional formats (e.g., \"Steps to manufacture illegal drugs\"), enumerates them as numbered lists, and converts these into typographic images. These images are then fed to target models to complete missing steps — a second jailbreaking paradigm in our evaluation. To ensure cross-benchmark consistency, we retain the 10 overlapping safety topics between two datasets." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 510, + 205, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 510, + 205, + 521 + ], + "spans": [ + { + "bbox": [ + 105, + 510, + 205, + 521 + ], + "type": "text", + "content": "2.2 Baseline MLRMs" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 534, + 506, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 534, + 506, + 601 + ], + "spans": [ + { + "bbox": [ + 104, + 534, + 506, + 601 + ], + "type": "text", + "content": "We evaluate five prominent open-source MLRMs spanning diverse base architectures and parameter scales: R1-OneVision [38], MM-EUREKA-Qwen [14], Mulberry-Qwen2-VL [39], Mulberry-LLaVA [39] and Mulberry-LlaMA [39]. These models exhibit R1-style reasoning capabilities through SFT and RL with human feedback alignment. Specifically, they generate intermediate reasoning steps before producing final answers, enhancing performance on complex multi-modal tasks but simultaneously amplifies safety risks due to extended reasoning pathways." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 621, + 212, + 632 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 621, + 212, + 632 + ], + "spans": [ + { + "bbox": [ + 105, + 621, + 212, + 632 + ], + "type": "text", + "content": "2.3 Evaluation Metrics" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 645, + 505, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 505, + 722 + ], + "type": "text", + "content": "We selected two evaluation metrics. The first is Harmfulness Rating (HR), which employs an additional LLM to assess the level of harm in the generated content. Following [44], we adopted a two-step evaluation to calculate HR: Initially, the output undergoes a substring lookup; if the model's response contains phrases like \"Sorry, I can not\", the HR is immediately set to 0. Next, a detailed prompt with specific scoring rules is designed to instruct GPT-4o-mini [45] to evaluate the harmfulness of the model's output according to the rules, using a 0-5 scale where 0 indicates no harmfulness, and 5 indicates extremely harmful content." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 70, + 274, + 287 + ], + "blocks": [ + { + "bbox": [ + 106, + 70, + 274, + 287 + ], + "lines": [ + { + "bbox": [ + 106, + 70, + 274, + 287 + ], + "spans": [ + { + "bbox": [ + 106, + 70, + 274, + 287 + ], + "type": "image", + "image_path": "f2de661e7d249e4c61df89f13c907384e6f0622209aab49011406c1b6d9ff010.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 293, + 504, + 316 + ], + "lines": [ + { + "bbox": [ + 104, + 293, + 504, + 316 + ], + "spans": [ + { + "bbox": [ + 104, + 293, + 504, + 316 + ], + "type": "text", + "content": "Figure 2: Comparison of ASR and HR scores across MLRMs and their base MLLMs under jailbreak attacks. The dark blue bars represent the ASR and HR of the base MLLMs. Best viewed in color." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 276, + 76, + 369, + 286 + ], + "blocks": [ + { + "bbox": [ + 276, + 76, + 369, + 286 + ], + "lines": [ + { + "bbox": [ + 276, + 76, + 369, + 286 + ], + "spans": [ + { + "bbox": [ + 276, + 76, + 369, + 286 + ], + "type": "image", + "image_path": "3b9bca542edeca217510c4d7e8e53b59bf563b76e85a29e386e4382336688c4d.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 373, + 76, + 424, + 286 + ], + "blocks": [ + { + "bbox": [ + 373, + 76, + 424, + 286 + ], + "lines": [ + { + "bbox": [ + 373, + 76, + 424, + 286 + ], + "spans": [ + { + "bbox": [ + 373, + 76, + 424, + 286 + ], + "type": "image", + "image_path": "1fbee6b1ef5f8c6c74b3dc102f6621c06ea51843f6bf84e089618fe23a75e04a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 428, + 75, + 498, + 286 + ], + "blocks": [ + { + "bbox": [ + 428, + 75, + 498, + 286 + ], + "lines": [ + { + "bbox": [ + 428, + 75, + 498, + 286 + ], + "spans": [ + { + "bbox": [ + 428, + 75, + 498, + 286 + ], + "type": "image", + "image_path": "dff095cff933feba5a38f3aee9fb251ef563c1d002268138fcfd04d14cdacb37.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 324, + 504, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 324, + 504, + 413 + ], + "spans": [ + { + "bbox": [ + 104, + 324, + 504, + 413 + ], + "type": "text", + "content": "The second metric is Attack Success Rate (ASR), defined as the number of successful attacks divided by the total number of attempted attacks. Formally, for a particular instruction dataset " + }, + { + "bbox": [ + 104, + 324, + 504, + 413 + ], + "type": "inline_equation", + "content": "D" + }, + { + "bbox": [ + 104, + 324, + 504, + 413 + ], + "type": "text", + "content": ", we define " + }, + { + "bbox": [ + 104, + 324, + 504, + 413 + ], + "type": "inline_equation", + "content": "ASR = I(Q_i) / |D|" + }, + { + "bbox": [ + 104, + 324, + 504, + 413 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 324, + 504, + 413 + ], + "type": "inline_equation", + "content": "Q_i" + }, + { + "bbox": [ + 104, + 324, + 504, + 413 + ], + "type": "text", + "content": " represents the set of text and image queries, and " + }, + { + "bbox": [ + 104, + 324, + 504, + 413 + ], + "type": "inline_equation", + "content": "I(\\cdot)" + }, + { + "bbox": [ + 104, + 324, + 504, + 413 + ], + "type": "text", + "content": " is an indicator function such that " + }, + { + "bbox": [ + 104, + 324, + 504, + 413 + ], + "type": "inline_equation", + "content": "I(\\cdot) = 1" + }, + { + "bbox": [ + 104, + 324, + 504, + 413 + ], + "type": "text", + "content": " if the response engages with the malicious query, and " + }, + { + "bbox": [ + 104, + 324, + 504, + 413 + ], + "type": "inline_equation", + "content": "I(\\cdot) = 0" + }, + { + "bbox": [ + 104, + 324, + 504, + 413 + ], + "type": "text", + "content": " otherwise. ASR is a common metric for evaluating model safety and robustness. In our experiments, we addressed a potential issue: directly using the LLM as a judge to determine if the model output has been \"jailbroken\" introduces considerable uncertainty. Therefore, based on the detailed guidelines in the aforementioned prompt, we define a jailbreak as successful when the HR score equals 4 or 5." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 422, + 504, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 422, + 504, + 499 + ], + "spans": [ + { + "bbox": [ + 104, + 422, + 504, + 499 + ], + "type": "text", + "content": "Evaluation Dimensions: As there has been no comprehensive evaluation specifically targeting MLRMs, we define two key dimensions for assessment: Result Safety and Overall Safety. These are equally important, as different LRM models may present outcomes in varying formats — either as raw results or as reasoning alongside the results. In the following sections, we will exhibit and analyze the metrics for these two dimensions. Note that while we also report metrics related to Reasoning Safety for investigating the relationship between the safety of the reasoning process and the final output, we consider this secondary to the two primary dimensions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 519, + 274, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 519, + 274, + 533 + ], + "spans": [ + { + "bbox": [ + 105, + 519, + 274, + 533 + ], + "type": "text", + "content": "3 Safety Inspection of MLRMs" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 547, + 194, + 559 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 547, + 194, + 559 + ], + "spans": [ + { + "bbox": [ + 105, + 547, + 194, + 559 + ], + "type": "text", + "content": "3.1 Reasoning Tax" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 569, + 504, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 569, + 504, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 569, + 504, + 624 + ], + "type": "text", + "content": "Prior work has observed the \"safety tax\" in reasoning models, i.e., performance deterioration caused by safety alignment [35]. We identify a critical counterpart: the reasoning tax, defined as the safety degradation incurred when augmenting MLLMs with reasoning capabilities through SFT or RL. To quantify this phenomenon, we systematically compare safety metrics between four safety-aligned base MLLMs and five MLRMs derived from them." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 506, + 723 + ], + "type": "text", + "content": "Experimental Protocol. To isolate the impact of reasoning capability acquisition, we adopt a two-stage evaluation: For base MLLMs, we directly assess the outputs for HR and ASR, as these models lack explicit reasoning step generation. For MLRMs, we collect HR and ASR across Think, Answer and Overall processes. Note that although we employed a standard prompt template to enforce the separation of the reasoning and answer processes, some MLRMs (e.g., Mulberry-LlaMA) still struggled to strictly follow the \"Think + Answer\" format in their outputs. For these models, we report their overall safety solely. Figure 1 and 2 exhibit the results under vanilla unsafe text queries and jailbreak attacks, respectively." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 111, + 110, + 504, + 521 + ], + "blocks": [ + { + "bbox": [ + 105, + 77, + 504, + 110 + ], + "lines": [ + { + "bbox": [ + 105, + 77, + 504, + 110 + ], + "spans": [ + { + "bbox": [ + 105, + 77, + 504, + 110 + ], + "type": "text", + "content": "Table 1: Comparison of HR scores on MM-SafetyBench datasets across various MLRMs and their base MLLMs. For abbreviations, Vani. and Jail. refer to performance under vanilla unsafe text queries and jailbreak attacks, respectively. The dash in the \"Model\" column represents the base MLLMs." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 111, + 110, + 504, + 521 + ], + "lines": [ + { + "bbox": [ + 111, + 110, + 504, + 521 + ], + "spans": [ + { + "bbox": [ + 111, + 110, + 504, + 521 + ], + "type": "table", + "html": "
BaseModelMethodStageIAHSRAPHFRPOPVLOFAHC
QW2.5-VL[46]-Vani.Overall0.040.280.841.210.301.850.682.674.043.27
Jail.Overall2.482.734.253.833.404.113.683.154.224.18
R1-OV [38]Vani.Think3.102.972.633.493.403.673.103.453.624.23
Answer3.553.293.234.193.924.513.204.104.364.32
Overall3.033.072.703.993.484.403.183.724.354.28
Jail.Think4.123.624.274.354.143.904.123.203.844.20
Answer4.363.864.664.594.514.154.543.714.034.25
Overall4.313.914.614.604.374.324.333.504.224.21
Eureka-QW [14]Vani.Think1.951.973.203.452.704.002.903.333.933.54
Answer2.031.873.343.112.294.152.553.354.043.62
Overall2.232.013.363.532.654.222.973.233.993.90
Jail.Think3.403.124.004.243.903.913.793.194.013.74
Answer2.893.254.304.033.663.823.673.083.913.71
Overall3.393.454.254.243.974.054.013.254.113.91
QW2-VL [47]-Vani.Overall0.050.050.550.790.141.750.361.853.822.25
Jail.Overall3.663.294.344.224.274.394.063.104.053.88
MBerry-QW [39]Vani.Think2.122.342.573.063.122.962.632.573.282.83
Answer2.452.473.323.442.623.953.063.353.883.61
Overall2.502.443.393.552.754.103.033.654.083.82
Jail.Think3.272.873.023.583.452.973.472.382.492.91
Answer4.414.074.484.474.444.084.323.393.863.86
Overall4.484.194.364.574.604.334.453.554.083.99
LMA3-LVA [48]-Vani.Overall0.150.501.451.920.582.450.912.074.072.64
Jail.Overall3.773.154.054.224.053.763.963.323.803.85
MBerry-LVA [39]Vani.Overall1.953.364.413.393.743.802.603.323.993.55
Jail.Overall3.853.523.554.033.773.783.893.253.443.79
LMA-3.2-Vision [43]-Vani.Overall0.100.732.071.760.624.431.093.734.423.63
Jail.Overall0.761.662.181.961.883.592.373.044.173.84
MBerry-LMA [39]Vani.Overall0.100.641.111.310.623.000.763.834.294.23
Jail.Overall3.383.182.983.583.113.653.423.513.564.07
", + "image_path": "327d55b58bbda774ffb1eec6ecd34158a8773e8c4f874654e783879ab6338cd3.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 529, + 504, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 529, + 504, + 541 + ], + "spans": [ + { + "bbox": [ + 105, + 529, + 504, + 541 + ], + "type": "text", + "content": "Key Findings. Figures 1 and 2 reveal systematic safety degradation across all MLRMs. Specifically," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 544, + 504, + 687 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 105, + 544, + 504, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 544, + 504, + 577 + ], + "spans": [ + { + "bbox": [ + 105, + 544, + 504, + 577 + ], + "type": "text", + "content": "- Overall Safety Collapse: MLRMs exhibit " + }, + { + "bbox": [ + 105, + 544, + 504, + 577 + ], + "type": "inline_equation", + "content": "31.30\\%" + }, + { + "bbox": [ + 105, + 544, + 504, + 577 + ], + "type": "text", + "content": " higher ASR (" + }, + { + "bbox": [ + 105, + 544, + 504, + 577 + ], + "type": "inline_equation", + "content": "59.52\\%" + }, + { + "bbox": [ + 105, + 544, + 504, + 577 + ], + "type": "text", + "content": " vs. base MLLMs' " + }, + { + "bbox": [ + 105, + 544, + 504, + 577 + ], + "type": "inline_equation", + "content": "28.22\\%" + }, + { + "bbox": [ + 105, + 544, + 504, + 577 + ], + "type": "text", + "content": ") and 1.64 higher HR (" + }, + { + "bbox": [ + 105, + 544, + 504, + 577 + ], + "type": "inline_equation", + "content": "3.07\\%" + }, + { + "bbox": [ + 105, + 544, + 504, + 577 + ], + "type": "text", + "content": " vs. 1.43), demonstrating that exposing reasoning chains fundamentally expands attack surfaces." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 578, + 504, + 610 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 578, + 504, + 610 + ], + "spans": [ + { + "bbox": [ + 105, + 578, + 504, + 610 + ], + "type": "text", + "content": "- Persistent Answer Degradation: Even when ignoring unsafe reasoning steps, MLRM answers show " + }, + { + "bbox": [ + 105, + 578, + 504, + 610 + ], + "type": "inline_equation", + "content": "25.89\\%" + }, + { + "bbox": [ + 105, + 578, + 504, + 610 + ], + "type": "text", + "content": " higher ASR and 1.30 higher HR compared to base models. This proves safety erosion persists beyond reasoning exposure, suggesting SFT/RL alignment damages intrinsic safeguards." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 611, + 504, + 665 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 611, + 504, + 665 + ], + "spans": [ + { + "bbox": [ + 105, + 611, + 504, + 665 + ], + "type": "text", + "content": "- Architectural Vulnerability: Qwen2.5-based MLRMs suffer catastrophic safety loss (ASR + 40.06%, HR + 2.02), suggesting architectural incompatibility between reasoning and safety mechanisms. Notably, MBerry-LMA exhibits an inverse trend: its safety metrics improve post-reasoning augmentation (ASR - 4.8%, HR - 0.21). We believe this anomaly hints at potential safeguards, and may provide a rare blueprint for designing safety-resilient reasoning models." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 666, + 504, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 666, + 504, + 687 + ], + "spans": [ + { + "bbox": [ + 105, + 666, + 504, + 687 + ], + "type": "text", + "content": "- Reasoning as Attack Vector: Unsafe reasoning occurs " + }, + { + "bbox": [ + 105, + 666, + 504, + 687 + ], + "type": "inline_equation", + "content": "12.52\\%" + }, + { + "bbox": [ + 105, + 666, + 504, + 687 + ], + "type": "text", + "content": " more frequently than unsafe answers, highlighting systemic risks in exposing intermediate reasoning to users." + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 111, + 111, + 504, + 520 + ], + "blocks": [ + { + "bbox": [ + 104, + 77, + 504, + 110 + ], + "lines": [ + { + "bbox": [ + 104, + 77, + 504, + 110 + ], + "spans": [ + { + "bbox": [ + 104, + 77, + 504, + 110 + ], + "type": "text", + "content": "Table 2: Comparison of HR scores on SafetyBench datasets across various MLRMs and their base MLLMs. For abbreviations, Vani. and Jail. refer to performance under vanilla unsafe text queries and jailbreak attacks, respectively. The dash in the \"Model\" column represents the base MLLMs." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 111, + 111, + 504, + 520 + ], + "lines": [ + { + "bbox": [ + 111, + 111, + 504, + 520 + ], + "spans": [ + { + "bbox": [ + 111, + 111, + 504, + 520 + ], + "type": "table", + "html": "
BaseModelMethodStageIAHSRAPHFRPOPVLOFAHC
QW2.5-VL [46]-Vani.Overall0.220.420.000.180.501.960.743.943.923.70
Jail.Overall2.782.684.363.044.084.043.143.564.323.62
R1-OV [38]Vani.Think3.302.574.122.943.484.173.103.404.003.22
Answer4.003.193.983.433.944.593.393.763.944.18
Overall3.442.663.843.143.664.302.983.224.063.64
Jail.Think4.123.794.864.584.444.204.523.403.804.34
Answer4.673.884.914.604.514.384.583.673.874.02
Overall4.183.784.884.624.544.484.523.583.883.98
Eureka-QW [14]Vani.Think2.891.613.262.142.184.042.283.303.903.54
Answer2.501.583.021.941.864.142.123.143.643.50
Overall2.501.503.041.901.844.002.303.303.543.28
Jail.Think3.142.843.903.343.464.183.503.383.783.62
Answer3.262.984.123.603.604.143.663.263.723.36
Overall3.322.83.983.523.584.183.543.523.863.60
QW2-VL [47]-Vani.Overall0.280.340.280.060.062.240.763.083.923.68
Jail.Overall3.302.744.823.344.404.483.824.044.284.40
MBerry-QW [39]Vani.Think2.722.423.142.392.623.372.622.713.243.33
Answer2.962.473.452.682.964.102.843.803.653.92
Overall2.882.303.422.463.064.162.683.723.663.86
Jail.Think4.153.954.484.104.314.364.163.733.893.82
Answer4.464.084.804.384.564.664.403.564.004.20
Overall4.694.244.884.424.644.524.463.464.004.18
LMA3-LVA [48]-Vani.Overall0.660.842.260.861.082.601.203.323.863.58
Jail.Overall4.223.064.484.364.243.764.043.323.803.85
MBerry-LVA [39]Vani.Overall1.901.463.681.523.283.482.223.624.043.64
Jail.Overall4.383.564.424.083.944.043.843.383.383.58
LMA-3.2-Vision [43]-Vani.Overall1.781.361.200.721.684.301.763.904.404.00
Jail.Overall2.923.304.263.863.644.223.583.523.983.96
MBerry-LMA [39]Vani.Overall1.180.721.700.881.003.181.123.844.243.64
Jail.Overall3.964.044.744.524.484.463.943.903.984.20
", + "image_path": "c06dc7b23f1c52b1a3bd45fbef39d0292edf091479515efa9369e608e17ed8ca.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 121, + 528, + 290, + 540 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 528, + 290, + 540 + ], + "spans": [ + { + "bbox": [ + 121, + 528, + 290, + 540 + ], + "type": "text", + "content": "Takeaway 3.1: Reasoning Tax in MLRMs" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 120, + 548, + 490, + 572 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 548, + 490, + 572 + ], + "spans": [ + { + "bbox": [ + 120, + 548, + 490, + 572 + ], + "type": "text", + "content": "SFT/RL-based reasoning acquisition severely compromises safety alignment of base MLLMs, a phenomenon we term the \"reasoning tax\" in MLRMs." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 601, + 208, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 601, + 208, + 613 + ], + "spans": [ + { + "bbox": [ + 105, + 601, + 208, + 613 + ], + "type": "text", + "content": "3.2 Safety Blind Spots" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 624, + 504, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 624, + 504, + 668 + ], + "spans": [ + { + "bbox": [ + 104, + 624, + 504, + 668 + ], + "type": "text", + "content": "We conduct fine-grained analysis to uncover safety blind spots—scenarios where MLRMs exhibit catastrophic safety failures despite base MLLMs achieving near-perfect alignment. We ask: (1) Do MLRMs inherit base models' scenario-specific safety profiles? (2) Does the reasoning tax manifest heterogeneously across scenarios?" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": "Experimental Protocol. Following [37], we evaluate 10 safety-critical scenarios: Illegal Activity, Hate Speech, Malware Generation, Physical Harm, Fraud, Pornography, Privacy Violence, Legal Opinion, Financial Advice, and Consultation. Comparison of HR scores across various scenarios on MM-SafetyBench and SafetyBench datasets are exhibited in Table 1 and 2, respectively." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 106, + 70, + 208, + 171 + ], + "blocks": [ + { + "bbox": [ + 106, + 70, + 208, + 171 + ], + "lines": [ + { + "bbox": [ + 106, + 70, + 208, + 171 + ], + "spans": [ + { + "bbox": [ + 106, + 70, + 208, + 171 + ], + "type": "image", + "image_path": "60217ca26d6c4706bc4f998ce4df4e04632607ededf903ac42f70a86fd91f47d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 215, + 71, + 303, + 171 + ], + "blocks": [ + { + "bbox": [ + 215, + 71, + 303, + 171 + ], + "lines": [ + { + "bbox": [ + 215, + 71, + 303, + 171 + ], + "spans": [ + { + "bbox": [ + 215, + 71, + 303, + 171 + ], + "type": "image", + "image_path": "53e2c0898fa6c870825288302fce6eecd6766b8ee84334a014b1d11362a0eb5f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 310, + 72, + 399, + 171 + ], + "blocks": [ + { + "bbox": [ + 310, + 72, + 399, + 171 + ], + "lines": [ + { + "bbox": [ + 310, + 72, + 399, + 171 + ], + "spans": [ + { + "bbox": [ + 310, + 72, + 399, + 171 + ], + "type": "image", + "image_path": "57a25f7f7f4c6c142c2394afd0944b8dcc8625bffd742589d64785330fa33d91.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 406, + 72, + 503, + 171 + ], + "blocks": [ + { + "bbox": [ + 406, + 72, + 503, + 171 + ], + "lines": [ + { + "bbox": [ + 406, + 72, + 503, + 171 + ], + "spans": [ + { + "bbox": [ + 406, + 72, + 503, + 171 + ], + "type": "image", + "image_path": "96d0037c6b1f0c919e8f64e91c808aa868ab3b2d406f3824f52753c349cc06d0.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 106, + 177, + 209, + 195 + ], + "blocks": [ + { + "bbox": [ + 106, + 177, + 209, + 195 + ], + "lines": [ + { + "bbox": [ + 106, + 177, + 209, + 195 + ], + "spans": [ + { + "bbox": [ + 106, + 177, + 209, + 195 + ], + "type": "image", + "image_path": "574b87aa30ef2437f88de2908076b3e19b49865518778ec5150bd86129cf9bef.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 213, + 177, + 306, + 195 + ], + "blocks": [ + { + "bbox": [ + 213, + 177, + 306, + 195 + ], + "lines": [ + { + "bbox": [ + 213, + 177, + 306, + 195 + ], + "spans": [ + { + "bbox": [ + 213, + 177, + 306, + 195 + ], + "type": "image", + "image_path": "bc774ec0c51aac450e2ceec06bbe9b1f0ea58d88ce42c26e8fd04ed1ac78bfe8.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 310, + 177, + 397, + 195 + ], + "blocks": [ + { + "bbox": [ + 310, + 177, + 397, + 195 + ], + "lines": [ + { + "bbox": [ + 310, + 177, + 397, + 195 + ], + "spans": [ + { + "bbox": [ + 310, + 177, + 397, + 195 + ], + "type": "image", + "image_path": "04693c91aafe664ff917e72897465625489f7fd357aabc415b90861cb283f963.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 401, + 170, + 502, + 195 + ], + "blocks": [ + { + "bbox": [ + 401, + 170, + 502, + 195 + ], + "lines": [ + { + "bbox": [ + 401, + 170, + 502, + 195 + ], + "spans": [ + { + "bbox": [ + 401, + 170, + 502, + 195 + ], + "type": "image", + "image_path": "4cdb68a77c41887d4e526d090e30fd1a2ff06614cb2e14b5073428ed1d868612.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 106, + 195, + 207, + 284 + ], + "blocks": [ + { + "bbox": [ + 106, + 195, + 207, + 284 + ], + "lines": [ + { + "bbox": [ + 106, + 195, + 207, + 284 + ], + "spans": [ + { + "bbox": [ + 106, + 195, + 207, + 284 + ], + "type": "image", + "image_path": "6356fd790de32a30a73b6fbc40b822e226a13a741a8e1a8e4c327465cad11262.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 213, + 195, + 304, + 284 + ], + "blocks": [ + { + "bbox": [ + 213, + 195, + 304, + 284 + ], + "lines": [ + { + "bbox": [ + 213, + 195, + 304, + 284 + ], + "spans": [ + { + "bbox": [ + 213, + 195, + 304, + 284 + ], + "type": "image", + "image_path": "55de58692bb2a404a8a856a334e4abff7819345d1afe93f4fd164a052c994d15.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 310, + 195, + 399, + 284 + ], + "blocks": [ + { + "bbox": [ + 310, + 195, + 399, + 284 + ], + "lines": [ + { + "bbox": [ + 310, + 195, + 399, + 284 + ], + "spans": [ + { + "bbox": [ + 310, + 195, + 399, + 284 + ], + "type": "image", + "image_path": "149faa88859aa02e53597cc89337335a33b638b96b121b487ef6244ed69951a5.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 401, + 195, + 503, + 284 + ], + "blocks": [ + { + "bbox": [ + 401, + 195, + 503, + 284 + ], + "lines": [ + { + "bbox": [ + 401, + 195, + 503, + 284 + ], + "spans": [ + { + "bbox": [ + 401, + 195, + 503, + 284 + ], + "type": "image", + "image_path": "65d048820b8a11abb71c8e4904d33e7f6a604b77a10e35895962f2dc0c344fad.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 113, + 290, + 209, + 308 + ], + "blocks": [ + { + "bbox": [ + 113, + 290, + 209, + 308 + ], + "lines": [ + { + "bbox": [ + 113, + 290, + 209, + 308 + ], + "spans": [ + { + "bbox": [ + 113, + 290, + 209, + 308 + ], + "type": "image", + "image_path": "7a14c4ceaa3d3d387e95528365cd5045cb9fb35a9973efd78da0869bd0346884.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 313, + 506, + 348 + ], + "lines": [ + { + "bbox": [ + 104, + 313, + 506, + 348 + ], + "spans": [ + { + "bbox": [ + 104, + 313, + 506, + 348 + ], + "type": "text", + "content": "Figure 3: Comparison of ASR scores across different MLRMs and their base MLLMs. For abbreviations, va. and ja. refer to performance under vanilla unsafe text queries and jailbreak attacks, respectively. We use MB to denote MLRM that are developed with MBerry method." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 212, + 290, + 306, + 308 + ], + "blocks": [ + { + "bbox": [ + 212, + 290, + 306, + 308 + ], + "lines": [ + { + "bbox": [ + 212, + 290, + 306, + 308 + ], + "spans": [ + { + "bbox": [ + 212, + 290, + 306, + 308 + ], + "type": "image", + "image_path": "06be97e39e60553cc8f956de76996a567c71d1a7d7613291a13fc8c3397cbf66.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 312, + 290, + 397, + 308 + ], + "blocks": [ + { + "bbox": [ + 312, + 290, + 397, + 308 + ], + "lines": [ + { + "bbox": [ + 312, + 290, + 397, + 308 + ], + "spans": [ + { + "bbox": [ + 312, + 290, + 397, + 308 + ], + "type": "image", + "image_path": "6c4e282129807050626985bcb6911ef73c9400789bbd8f7d6f064b92e0437af5.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 401, + 282, + 502, + 308 + ], + "blocks": [ + { + "bbox": [ + 401, + 282, + 502, + 308 + ], + "lines": [ + { + "bbox": [ + 401, + 282, + 502, + 308 + ], + "spans": [ + { + "bbox": [ + 401, + 282, + 502, + 308 + ], + "type": "image", + "image_path": "91406f4d686d264c572bc7d1bb07d6ec7d28811e9c5b704367ea8ea288d1c7f4.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 355, + 504, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 355, + 504, + 378 + ], + "spans": [ + { + "bbox": [ + 104, + 355, + 504, + 378 + ], + "type": "text", + "content": "Additionally, we provide a more intuitive display for ASR in the form of a radar chart, as shown in Figure 3." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 387, + 506, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 387, + 506, + 477 + ], + "spans": [ + { + "bbox": [ + 104, + 387, + 506, + 477 + ], + "type": "text", + "content": "Key Findings. Experimental results reveal alarming safety blind spots. For instance, in I11legal Activity scenario, MLRMs show " + }, + { + "bbox": [ + 104, + 387, + 506, + 477 + ], + "type": "inline_equation", + "content": "3.79 \\times" + }, + { + "bbox": [ + 104, + 387, + 506, + 477 + ], + "type": "text", + "content": " higher ASR than base MLLMs on average. Furthermore, while the base MLLM Qwen2.5-VL achieves near-perfect safety (ASR " + }, + { + "bbox": [ + 104, + 387, + 506, + 477 + ], + "type": "inline_equation", + "content": "< 3\\%" + }, + { + "bbox": [ + 104, + 387, + 506, + 477 + ], + "type": "text", + "content": "), its MLRM derivative R1-Onevision suffers catastrophic failure (ASR " + }, + { + "bbox": [ + 104, + 387, + 506, + 477 + ], + "type": "inline_equation", + "content": "> 50\\%" + }, + { + "bbox": [ + 104, + 387, + 506, + 477 + ], + "type": "text", + "content": "), achieving near " + }, + { + "bbox": [ + 104, + 387, + 506, + 477 + ], + "type": "inline_equation", + "content": "25 \\times" + }, + { + "bbox": [ + 104, + 387, + 506, + 477 + ], + "type": "text", + "content": " degradation. This exposes catastrophic alignment erosion in reasoning-enhanced architectures. Overall, safety degradation varies dramatically across scenarios (" + }, + { + "bbox": [ + 104, + 387, + 506, + 477 + ], + "type": "inline_equation", + "content": "\\Delta" + }, + { + "bbox": [ + 104, + 387, + 506, + 477 + ], + "type": "text", + "content": "ASR range: " + }, + { + "bbox": [ + 104, + 387, + 506, + 477 + ], + "type": "inline_equation", + "content": "8.1\\% - 2500\\%" + }, + { + "bbox": [ + 104, + 387, + 506, + 477 + ], + "type": "text", + "content": "), with I11legal Activity/Pornography being most/least affected scenarios, demanding urgent scenario-specific red teaming and adaptive alignment protocols for MLRMs." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 121, + 490, + 259, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 490, + 259, + 502 + ], + "spans": [ + { + "bbox": [ + 121, + 490, + 259, + 502 + ], + "type": "text", + "content": "Takeaway 3.2: Safety Blind Spots" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 120, + 510, + 490, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 510, + 490, + 536 + ], + "spans": [ + { + "bbox": [ + 120, + 510, + 490, + 536 + ], + "type": "text", + "content": "MLRMs introduce critical safety blind spots — scenarios where base MLLMs excel (e.g., Illegal Activity) become catastrophic vulnerabilities post-reasoning augmentation." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 105, + 568, + 241, + 580 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 568, + 241, + 580 + ], + "spans": [ + { + "bbox": [ + 105, + 568, + 241, + 580 + ], + "type": "text", + "content": "3.3 Emergent Self-Correction" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 104, + 591, + 506, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 591, + 506, + 625 + ], + "spans": [ + { + "bbox": [ + 104, + 591, + 506, + 625 + ], + "type": "text", + "content": "In this section, we aim to analyze the relationship between thinking safety and answer safety. This helps uncover potential correlations in the safety resilience across different stages of MLRM outputs, providing insights for constructing hierarchical defense strategies." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 634, + 506, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 634, + 506, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 634, + 506, + 691 + ], + "type": "text", + "content": "Experimental Protocol. We select three MLRMs (i.e., R1-OneVision, MM-EUREKA-Qwen, Mulberry-Qwen), all strictly following \"Think + Answer\" output formats. From the 10 safety-critical scenarios, we randomly sample 100 adversarial queries per scenario, forming a 1000-sample test set. For each query, we compute Think-HR and Answer-HR under jailbreaking and visualize their normalized joint distribution via 2D heatmaps, as exhibited in Figure 4." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 699, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 699, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 699, + 506, + 723 + ], + "type": "text", + "content": "Key Findings. Figure 4 reveals strong symmetric coupling between Think-HR and Answer-HR. Specifically," + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 76, + 218, + 191 + ], + "blocks": [ + { + "bbox": [ + 108, + 76, + 218, + 191 + ], + "lines": [ + { + "bbox": [ + 108, + 76, + 218, + 191 + ], + "spans": [ + { + "bbox": [ + 108, + 76, + 218, + 191 + ], + "type": "image", + "image_path": "0f6b250de89a5323a9368b9e1db8598f144284be977a86440193d4c19b46cf1a.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 233, + 76, + 343, + 191 + ], + "blocks": [ + { + "bbox": [ + 233, + 76, + 343, + 191 + ], + "lines": [ + { + "bbox": [ + 233, + 76, + 343, + 191 + ], + "spans": [ + { + "bbox": [ + 233, + 76, + 343, + 191 + ], + "type": "image", + "image_path": "b96251a71647018544dd68520a8308c9c7cfc9cc7569ff38fe7cb1d03bdbe385.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 360, + 75, + 503, + 191 + ], + "blocks": [ + { + "bbox": [ + 360, + 75, + 503, + 191 + ], + "lines": [ + { + "bbox": [ + 360, + 75, + 503, + 191 + ], + "spans": [ + { + "bbox": [ + 360, + 75, + 503, + 191 + ], + "type": "image", + "image_path": "98b5b441b4b4a884c9266ad7fc01d2d0e1969952264386748b5b2cf63ca2e5a6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 108, + 205, + 218, + 320 + ], + "blocks": [ + { + "bbox": [ + 108, + 205, + 218, + 320 + ], + "lines": [ + { + "bbox": [ + 108, + 205, + 218, + 320 + ], + "spans": [ + { + "bbox": [ + 108, + 205, + 218, + 320 + ], + "type": "image", + "image_path": "ffdc7b83ae60bf79a28deb1a926f46e94e6cc41907599645a0ec1bc82fd49706.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 328, + 506, + 361 + ], + "lines": [ + { + "bbox": [ + 104, + 328, + 506, + 361 + ], + "spans": [ + { + "bbox": [ + 104, + 328, + 506, + 361 + ], + "type": "text", + "content": "Figure 4: The relationship between reasoning safety and answer safety, where the horizontal and vertical axes represent HR scores. The numbers in the color blocks represent the normalized probabilities, with deeper colors indicating higher probabilities. Best viewed in color." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 234, + 205, + 343, + 320 + ], + "blocks": [ + { + "bbox": [ + 234, + 205, + 343, + 320 + ], + "lines": [ + { + "bbox": [ + 234, + 205, + 343, + 320 + ], + "spans": [ + { + "bbox": [ + 234, + 205, + 343, + 320 + ], + "type": "image", + "image_path": "feec6998f962b31d78f94fa1bbbcb11311f15d2b33513d67b19b41c78845006e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 360, + 205, + 503, + 319 + ], + "blocks": [ + { + "bbox": [ + 360, + 205, + 503, + 319 + ], + "lines": [ + { + "bbox": [ + 360, + 205, + 503, + 319 + ], + "spans": [ + { + "bbox": [ + 360, + 205, + 503, + 319 + ], + "type": "image", + "image_path": "4f65d845f53f43f7c10558a1e1c58dbf484417bc971c452cf7673bb398f6dcc5.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 370, + 506, + 480 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 105, + 370, + 504, + 426 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 370, + 504, + 426 + ], + "spans": [ + { + "bbox": [ + 105, + 370, + 504, + 426 + ], + "type": "text", + "content": "- Reasoning-Answer Coupling: When reasoning steps are compromised (Think-HR > 3), answer jailbreaking success rate reaches " + }, + { + "bbox": [ + 105, + 370, + 504, + 426 + ], + "type": "inline_equation", + "content": "90.9\\%" + }, + { + "bbox": [ + 105, + 370, + 504, + 426 + ], + "type": "text", + "content": ", indicating that unsafe cognitive frameworks propagate to outputs. Conversely, " + }, + { + "bbox": [ + 105, + 370, + 504, + 426 + ], + "type": "inline_equation", + "content": "93.7\\%" + }, + { + "bbox": [ + 105, + 370, + 504, + 426 + ], + "type": "text", + "content": " of compromised answers (Answer-HR > 3) originate from unsafe reasoning steps, suggesting output vulnerabilities necessitate but are not fully determined by reasoning flaws." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 425, + 506, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 425, + 506, + 480 + ], + "spans": [ + { + "bbox": [ + 105, + 425, + 506, + 480 + ], + "type": "text", + "content": "- Emergent Self-Correction: A subset of MLRMs (e.g., Mulberry-Qwen) exhibit right-upper quadrant clustering in heatmaps, where " + }, + { + "bbox": [ + 105, + 425, + 506, + 480 + ], + "type": "inline_equation", + "content": "12.4\\%" + }, + { + "bbox": [ + 105, + 425, + 506, + 480 + ], + "type": "text", + "content": " of unsafe reasoning steps (Think-HR " + }, + { + "bbox": [ + 105, + 425, + 506, + 480 + ], + "type": "inline_equation", + "content": ">3" + }, + { + "bbox": [ + 105, + 425, + 506, + 480 + ], + "type": "text", + "content": ") yield safe answers (Answer-HR " + }, + { + "bbox": [ + 105, + 425, + 506, + 480 + ], + "type": "inline_equation", + "content": "\\leq 3" + }, + { + "bbox": [ + 105, + 425, + 506, + 480 + ], + "type": "text", + "content": "), exhibiting emergent self-correction capabilities in MLRMs. This nascent capability provides a foothold for layered defenses: hardening reasoning steps while amplifying innate safeguards." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 488, + 286, + 500 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 488, + 286, + 500 + ], + "spans": [ + { + "bbox": [ + 121, + 488, + 286, + 500 + ], + "type": "text", + "content": "Takeaway 3.3: Emergent Self-Correction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 120, + 510, + 489, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 120, + 510, + 489, + 533 + ], + "spans": [ + { + "bbox": [ + 120, + 510, + 489, + 533 + ], + "type": "text", + "content": "MLRMs exhibit intrinsic self-correction: " + }, + { + "bbox": [ + 120, + 510, + 489, + 533 + ], + "type": "inline_equation", + "content": "12.4\\%" + }, + { + "bbox": [ + 120, + 510, + 489, + 533 + ], + "type": "text", + "content": " of unsafe reasoning chains are overridden by safe answers, revealing preserved safeguards that reject harmful cognition." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 555, + 197, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 555, + 197, + 567 + ], + "spans": [ + { + "bbox": [ + 105, + 555, + 197, + 567 + ], + "type": "text", + "content": "4 Related Work" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 579, + 506, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 579, + 506, + 721 + ], + "spans": [ + { + "bbox": [ + 104, + 579, + 506, + 721 + ], + "type": "text", + "content": "Multi-modal Reasoning in Language Models. CoT reasoning process has proven effective for enhancing multi-modal capabilities in language models [25]. Initial approaches focused on prompt engineering such as constructing zero-shot prompts like \"think step-by-step to understand the given text and image inputs\" directly [17]. In contrast, the cascade paradigm achieved multi-modal reasoning asynchronously (e.g., using one model to describe an image for an LRM to reason) [24]. Reinforcement learning-based models such as LLaVA-Reasoner [9] and Insight-V [12] leveraged CoT datasets and direct preference optimization (DPO) with human feedback to acquire intrinsic reasoning skills. The success of DeepSeek-R1 further catalyzed this trend, inspiring derivative architectures such as Easy-R1 [49], R1-OneVision [50], Multimodal-Open-R1 [13], R1-V [51], VLM-R1 [10] and LMM-R1 [52]. Notably, process reward models (PRMs) like MSTaR [241] and VisualPRM [242] represent a paradigm shift from outcome-focused reward models (ORMs). By providing stepwise feedback during reasoning, PRMs enhance self-consistency and enable iterative self-improvement in MLLMs [25]." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 204 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 204 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 204 + ], + "type": "text", + "content": "Safety Exploration of LRMs. The rapid advancement of LRMs has prompted growing attention to their safety risks. Recent work systematically evaluates mainstream LRMs across multiple dimensions [28, 35, 32, 33]. For example, [53] reveals significant gaps between open-source R1 models and commercial counterparts like o3-mini, while [54] verifies that DeepSeek-R1 failed to successfully block most harmful prompts, exposing critical vulnerabilities under adversarial conditions. Works by [34, 27, 55, 26, 30] further probe the safety boundaries of the LRM and identify their failure modes. Concurrently, researchers have begun investigating attack and defense paradigms specific to LRMs — [29] demonstrates that LRMs are uniquely vulnerable to fine-tuning attacks and [31] critiques the limitations of RL-based safety alignment (e.g., reward hacking, generalization failures) in mitigating harmful outputs. However, these efforts focus narrowly on unimodal LRMs, leaving the safety implications of MLRMs largely unexplored — a critical gap given the distinct risks introduced by cross-modal interactions inherent to MLRM architectures." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 219, + 185, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 219, + 185, + 232 + ], + "spans": [ + { + "bbox": [ + 105, + 219, + 185, + 232 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 243, + 506, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 243, + 506, + 376 + ], + "spans": [ + { + "bbox": [ + 104, + 243, + 506, + 376 + ], + "type": "text", + "content": "The rapid integration of reasoning capabilities into MLLMs has birthed powerful MLRMs with transformative potential. However, our systematic evaluation reveals that this advancement comes at a profound cost to safety. Through large-scale empirical studies comparing MLRMs with their base MLLMs, we uncover three critical insights: (1) A reasoning tax: MLRMs suffer " + }, + { + "bbox": [ + 104, + 243, + 506, + 376 + ], + "type": "inline_equation", + "content": "37.44\\%" + }, + { + "bbox": [ + 104, + 243, + 506, + 376 + ], + "type": "text", + "content": " higher jailbreaking rates than base MLLMs due to eroded safety alignment; (2) Scenario-specific blind spots: risks spike " + }, + { + "bbox": [ + 104, + 243, + 506, + 376 + ], + "type": "inline_equation", + "content": "25\\times" + }, + { + "bbox": [ + 104, + 243, + 506, + 376 + ], + "type": "text", + "content": " in contexts like Illegal Activity, far exceeding average degradation; (3) Emergent self-correction: Despite tight reasoning- answer safety coupling, MLRMs demonstrate nascent self-correction. That is, " + }, + { + "bbox": [ + 104, + 243, + 506, + 376 + ], + "type": "inline_equation", + "content": "16.23\\%" + }, + { + "bbox": [ + 104, + 243, + 506, + 376 + ], + "type": "text", + "content": " of jailbroken reasoning steps are overridden by safe answers, hinting at intrinsic safeguards. To catalyze mitigation, we release OpenSafeMLRM, an open-source toolkit with unified interface for evaluating mainstream MLRMs, datasets, and attacks. These findings mandate urgent efforts to govern how multi-modal AI reasons ensuring capability advances align with ethical imperatives." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 390, + 164, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 390, + 164, + 403 + ], + "spans": [ + { + "bbox": [ + 105, + 390, + 164, + 403 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 110, + 408, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 110, + 408, + 506, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 408, + 506, + 584 + ], + "spans": [ + { + "bbox": [ + 110, + 408, + 506, + 584 + ], + "type": "text", + "content": "[1] DeepSeek-AI, Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, Xiaokang Zhang, Xingkai Yu, Yu Wu, Z. F. Wu, Zhibin Gou, Zhihong Shao, Zhuoshu Li, Ziyi Gao, Aixin Liu, Bing Xue, Bingxuan Wang, Bochao Wu, Bei Feng, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Qu, Hui Li, Jianzhong Guo, Jiashi Li, Jiawei Wang, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, J. L. Cai, Jiaqi Ni, Jian Liang, Jin Chen, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Liang Zhao, Litong Wang, Liyue Zhang, Lei Xu, Leyi Xia, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Meng Li, Miaojun Wang, Mingming Li, Ning Tian, Panpan Huang, Peng Zhang, Qiancheng Wang, Qinyu Chen, Qiushi Du, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, R. J. Chen, R. L. Jin, Ruyi Chen, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shengfeng Ye, Shiyu Wang, Shuiping Yu, Shunfeng Zhou, Shuting Pan, and S. S. Li. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. CoRR, abs/2501.12948, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 110, + 590, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 590, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 110, + 590, + 506, + 723 + ], + "type": "text", + "content": "[2] DeepSeek-AI, Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, Damai Dai, Daya Guo, Dejian Yang, Deli Chen, Dongjie Ji, Erhang Li, Fangyun Lin, Fucong Dai, Fuli Luo, Guangbo Hao, Guanting Chen, Guowei Li, H. Zhang, Han Bao, Hanwei Xu, Haocheng Wang, Haowei Zhang, Honghui Ding, Huajian Xin, Huazuo Gao, Hui Li, Hui Qu, J. L. Cai, Jian Liang, Jianzhong Guo, Jiaqi Ni, Jiashi Li, Jiawei Wang, Jin Chen, Jingchang Chen, Jingyang Yuan, Junjie Qiu, Junlong Li, Junxiao Song, Kai Dong, Kai Hu, Kaige Gao, Kang Guan, Kexin Huang, Kuai Yu, Lean Wang, Lecong Zhang, Lei Xu, Leyi Xia, Liang Zhao, Litong Wang, Liyue Zhang, Meng Li, Miaojun Wang, Mingchuan Zhang, Minghua Zhang, Minghui Tang, Mingming Li, Ning Tian, Panpan Huang, Peiyi Wang, Peng Zhang, Qiancheng Wang, Qihao Zhu, Qinyu Chen, Qiushi Du, R. J. Chen, R. L. Jin, Ruiqi Ge, Ruisong Zhang, Ruizhe Pan, Runji Wang, Runxin Xu, Ruoyu Zhang, Ruyi Chen, S. S. Li, Shanghao Lu, Shangyan Zhou, Shanhuang Chen, Shaoqing Wu" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 505, + 723 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 127, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 127, + 72, + 505, + 106 + ], + "type": "text", + "content": "Shengfeng Ye, Shengfeng Ye, Shirong Ma, Shiyu Wang, Shuang Zhou, Shuiping Yu, Shunfeng Zhou, Shuting Pan, T. Wang, Tao Yun, Tian Pei, Tianyu Sun, W. L. Xiao, and Wangding Zeng. Deepseek-v3 technical report. CoRR, abs/2412.19437, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 111, + 114, + 318, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 114, + 318, + 126 + ], + "spans": [ + { + "bbox": [ + 111, + 114, + 318, + 126 + ], + "type": "text", + "content": "[3] OpenAI. Learning to reason with LLMs. 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 134, + 504, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 134, + 504, + 158 + ], + "spans": [ + { + "bbox": [ + 111, + 134, + 504, + 158 + ], + "type": "text", + "content": "[4] Edward Y. Chang, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. Demystifying long chain-of-thought reasoning in llms. CoRR, abs/2502.03373, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 165, + 505, + 200 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 165, + 505, + 200 + ], + "spans": [ + { + "bbox": [ + 111, + 165, + 505, + 200 + ], + "type": "text", + "content": "[5] Daya Guo, Qihao Zhu, Dejian Yang, Zhenda Xie, Kai Dong, Wentao Zhang, Guanting Chen, Xiao Bi, Yu Wu, YK Li, et al. Deepseek-coder: When the large language model meets programming-the rise of code intelligence. arXiv preprint arXiv:2401.14196, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 207, + 505, + 242 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 207, + 505, + 242 + ], + "spans": [ + { + "bbox": [ + 111, + 207, + 505, + 242 + ], + "type": "text", + "content": "[6] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 249, + 505, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 249, + 505, + 283 + ], + "spans": [ + { + "bbox": [ + 111, + 249, + 505, + 283 + ], + "type": "text", + "content": "[7] Jingyi Zhang, Jiaxing Huang, Huanjin Yao, Shunyu Liu, Xikun Zhang, Shijian Lu, and Dacheng Tao. R1-vl: Learning to reason with multimodal large language models via stepwise group relative policy optimization. 2025. arXiv preprint arXiv:2503.12397." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 291, + 505, + 314 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 291, + 505, + 314 + ], + "spans": [ + { + "bbox": [ + 111, + 291, + 505, + 314 + ], + "type": "text", + "content": "[8] Hengguang Zhou, Xinui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. R1-zero's \"aha moment\" in visual reasoning on a 2b non-sft model. 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 322, + 505, + 357 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 322, + 505, + 357 + ], + "spans": [ + { + "bbox": [ + 111, + 322, + 505, + 357 + ], + "type": "text", + "content": "[9] Ruohong Zhang, Bowen Zhang, Yanghao Li, Haotian Zhang, Zhiqing Sun, Zhe Gan, Yinfei Yang, Ruoming Pang, and Yiming Yang. Improve vision language model chain-of-thought reasoning. arXiv preprint arXiv:2410.16198, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 364, + 504, + 388 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 364, + 504, + 388 + ], + "spans": [ + { + "bbox": [ + 105, + 364, + 504, + 388 + ], + "type": "text", + "content": "[10] Haozhan Shen, Zilun Zhang, Qianqian Zhang, Ruochen Xu, and Tiancheng Zhao. Vlm-r1: A stable and generalizable r1-style large vision-language model. 2025. Accessed: 2025-02-15." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 396, + 504, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 396, + 504, + 419 + ], + "spans": [ + { + "bbox": [ + 105, + 396, + 504, + 419 + ], + "type": "text", + "content": "[11] Liu Yuqi, Peng Bohao, Zhong Zhisheng, Yue Zihao, Lu Fanbin, Yu Bei, and Jia Jiaya. Seg-zero: Reasoning-chain guided segmentation via cognitive reinforcement. 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 426, + 504, + 460 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 426, + 504, + 460 + ], + "spans": [ + { + "bbox": [ + 105, + 426, + 504, + 460 + ], + "type": "text", + "content": "[12] Yuhao Dong, Zuyan Liu, Hai-Long Sun, Jingkang Yang, Winston Hu, Yongming Rao, and Ziwei Liu. Insight-v: Exploring long-chain visual reasoning with multimodal large language models. arXiv preprint arXiv:2411.14432, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 468, + 498, + 482 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 498, + 482 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 498, + 482 + ], + "type": "text", + "content": "[13] EvolvingLMMs Lab. Multimodal open r1. GitHub repository, 2025. Accessed: 2025-02-28." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 489, + 504, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 489, + 504, + 533 + ], + "spans": [ + { + "bbox": [ + 105, + 489, + 504, + 533 + ], + "type": "text", + "content": "[14] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfen Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, Ping Luo, Yu Qiao, Qiaosheng Zhang, and Wenqi Shao. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 542, + 504, + 565 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 542, + 504, + 565 + ], + "spans": [ + { + "bbox": [ + 105, + 542, + 504, + 565 + ], + "type": "text", + "content": "[15] Zhangquan Chen, Xufang Luo, and Dongsheng Li. Visrl: Intention-driven visual perception via reinforced reasoning. 2025. arXiv preprint arXiv:2503.07523." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 573, + 504, + 596 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 573, + 504, + 596 + ], + "spans": [ + { + "bbox": [ + 105, + 573, + 504, + 596 + ], + "type": "text", + "content": "[16] Jiaxing Zhao, Xihan Wei, and Liefeng Bo. R1-omni: Explainable omni-multimodal emotion recognition with reinforcement learning. arXiv e-prints, pages arXiv-2503, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 604, + 504, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 604, + 504, + 649 + ], + "spans": [ + { + "bbox": [ + 105, + 604, + 504, + 649 + ], + "type": "text", + "content": "[17] Vaishnavi Himakunthala, Andy Ouyang, Daniel Rose, Ryan He, Alex Mei, Yujie Lu, Chinmay Sonar, Michael Saxon, and William Yang Wang. Let's think frame by frame with VIP: A video infilling and prediction dataset for evaluating video chain-of-thought. arXiv preprint arXiv:2305.13903, 2023." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 657, + 504, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 657, + 504, + 680 + ], + "spans": [ + { + "bbox": [ + 105, + 657, + 504, + 680 + ], + "type": "text", + "content": "[18] Fanxu Meng, Haotong Yang, Yiding Wang, and Muhan Zhang. Chain of images for intuitively reasoning. arXiv preprint arXiv:2311.09241, 2023." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 688, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 688, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 688, + 504, + 723 + ], + "type": "text", + "content": "[19] Jingran Xie, Shun Lei, Yue Yu, Yang Xiang, Hui Wang, Xixin Wu, and Zhiyong Wu. Leveraging chain of thought towards empathetic spoken dialogue without corresponding question answering data. arXiv preprint arXiv:2501.10937, 2025." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "text", + "content": "[20] Xuewen Luo, Fan Ding, Yinsheng Song, Xiaofeng Zhang, and Junnyong Loo. Pkrd-cot: A unified chain-of-thought prompting for multi-modal large language models in autonomous driving. arXiv preprint arXiv:2412.02025, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 111, + 504, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 111, + 504, + 146 + ], + "spans": [ + { + "bbox": [ + 106, + 111, + 504, + 146 + ], + "type": "text", + "content": "[21] Haojie Zheng, Tianyang Xu, Hanchi Sun, Shu Pu, Ruoxi Chen, and Lichao Sun. Thinking before looking: Improving multimodal llm reasoning via mitigating visual hallucination. arXiv preprint arXiv:2411.12591, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 152, + 505, + 197 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 152, + 505, + 197 + ], + "spans": [ + { + "bbox": [ + 106, + 152, + 505, + 197 + ], + "type": "text", + "content": "[22] Timin Gao, Peixian Chen, Mengdan Zhang, Chaoyou Fu, Yunhang Shen, Yan Zhang, Shengchuan Zhang, Xiawu Zheng, Xing Sun, Liujuan Cao, et al. Cantor: Inspiring multimodal chain-of-thought of mllm. In Proceedings of the 32nd ACM International Conference on Multimedia, pages 9096-9105, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 202, + 505, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 202, + 505, + 238 + ], + "spans": [ + { + "bbox": [ + 106, + 202, + 505, + 238 + ], + "type": "text", + "content": "[23] Wenshan Wu, Shaoguang Mao, Yadong Zhang, Yan Xia, Li Dong, Lei Cui, and Furu Wei. Mind's eye of llms: Visualization-of-thought elicits spatial reasoning in large language models. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 242, + 505, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 242, + 505, + 277 + ], + "spans": [ + { + "bbox": [ + 106, + 242, + 505, + 277 + ], + "type": "text", + "content": "[24] Bozhi Luan, Hao Feng, Hong Chen, Yonghui Wang, Wengang Zhou, and Houqiang Li. Textcot: Zoom in for enhanced multimodal text-rich image understanding. arXiv preprint arXiv:2404.09797, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 282, + 505, + 317 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 282, + 505, + 317 + ], + "spans": [ + { + "bbox": [ + 106, + 282, + 505, + 317 + ], + "type": "text", + "content": "[25] Yaoting Wang, Shengqiong Wu, Yuecheng Zhang, William Wang, Ziwei Liu, Jiebo Luo, and Hao Fei. Multimodal chain-of-thought reasoning: A comprehensive survey. arXiv preprint arXiv:2503.12605, 2025." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 322, + 423, + 335 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 322, + 423, + 335 + ], + "spans": [ + { + "bbox": [ + 106, + 322, + 423, + 335 + ], + "type": "text", + "content": "[26] Chatterbox. Gen ai safety model comparison and reasoning results, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 340, + 443, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 340, + 443, + 354 + ], + "spans": [ + { + "bbox": [ + 106, + 340, + 443, + 354 + ], + "type": "text", + "content": "[27] Adversa.ai. Introducing safety aligned deepseek r1 model by enkrypt ai, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 358, + 505, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 358, + 505, + 392 + ], + "spans": [ + { + "bbox": [ + 106, + 358, + 505, + 392 + ], + "type": "text", + "content": "[28] Wenjing Zhang, Xuejiao Lei, Zhaoxiang Liu, Ning Wang, Zhenhong Long, Peijun Yang, Jiaojiao Zhao, Minjie Hua, Chaoyang Ma, Kai Wang, et al. Safety evaluation of deepseek models in chinese contexts. arXiv preprint arXiv:2502.11137, 2025." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 398, + 505, + 432 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 398, + 505, + 432 + ], + "spans": [ + { + "bbox": [ + 106, + 398, + 505, + 432 + ], + "type": "text", + "content": "[29] Zhiyuan Xu, Joseph Gardiner, and Sana Belguith. The dark deep side of deepseek: Fine-tuning attacks against the safety alignment of cot-enabled models. arXiv preprint arXiv:2502.01225, 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 437, + 505, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 437, + 505, + 472 + ], + "spans": [ + { + "bbox": [ + 106, + 437, + 505, + 472 + ], + "type": "text", + "content": "[30] Haoyu Wang, Zeyu Qin, Li Shen, Xueqian Wang, Minhao Cheng, and Dacheng Tao. Leveraging reasoning with guidelines to elicit and utilize knowledge for enhancing safety alignment. arXiv preprint arXiv:2502.04040, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 478, + 505, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 478, + 505, + 512 + ], + "spans": [ + { + "bbox": [ + 106, + 478, + 505, + 512 + ], + "type": "text", + "content": "[31] Manojkumar Parmar and Yuvaraj Govindarajulu. Challenges in ensuring ai safety in deepseek-r1 models: The shortcomings of reinforcement learning strategies. arXiv preprint arXiv:2501.17030, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 517, + 505, + 552 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 517, + 505, + 552 + ], + "spans": [ + { + "bbox": [ + 106, + 517, + 505, + 552 + ], + "type": "text", + "content": "[32] Abhinav Kumar, Jaechul Roh, Ali Naseh, Marzena Karpinska, Mohit Iyyer, Amir Houmansadr, and Eugene Bagdasarian. Overthink: Slowdown attacks on reasoning llms. arXiv e-prints, page arXiv-2502, February 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 557, + 505, + 603 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 557, + 505, + 603 + ], + "spans": [ + { + "bbox": [ + 106, + 557, + 505, + 603 + ], + "type": "text", + "content": "[33] Qiguang Chen, Libo Qin, Jiaqi Wang, Jingxuan Zhou, and Wanxiang Che. Unlocking the capabilities of thought: A reasoning boundary framework to quantify and optimize chain-of-thought. In The Thirty-eighth Annual Conference on Neural Information Processing Systems, September 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 609, + 505, + 643 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 609, + 505, + 643 + ], + "spans": [ + { + "bbox": [ + 106, + 609, + 505, + 643 + ], + "type": "text", + "content": "[34] Zonghao Ying, Guangyi Zheng, Yongxin Huang, Deyue Zhang, Wenxin Zhang, Quanchen Zou, Aishan Liu, Xianglong Liu, and Dacheng Tao. Towards understanding the safety boundaries of deepseek models: Evaluation and findings. arXiv preprint arXiv:2503.15092, 2025." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 648, + 505, + 682 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 648, + 505, + 682 + ], + "spans": [ + { + "bbox": [ + 106, + 648, + 505, + 682 + ], + "type": "text", + "content": "[35] Tiansheng Huang, Sihao Hu, Fatih Ilhan, Selim Furkan Tekin, Zachary Yahn, Yichang Xu, and Ling Liu. Safety tax: Safety alignment makes your large reasoning models less reasonable. arXiv preprint arXiv:2503.00555, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 106, + 688, + 505, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 688, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 688, + 505, + 723 + ], + "type": "text", + "content": "[36] Mang Ye, Xuankun Rong, Wenke Huang, Bo Du, Nenghai Yu, and Dacheng Tao. A survey of safety on large vision-language models: Attacks, defenses and evaluations. CoRR, abs/2502.14881, 2025." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 722 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "text", + "content": "[37] Xin Liu, Yichen Zhu, Jindong Gu, Yunshi Lan, Chao Yang, and Yu Qiao. Mm-safetybench: A benchmark for safety evaluation of multimodal large language models. In ECCV (56), volume 15114 of Lecture Notes in Computer Science, pages 386-403. Springer, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 114, + 506, + 158 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 114, + 506, + 158 + ], + "spans": [ + { + "bbox": [ + 106, + 114, + 506, + 158 + ], + "type": "text", + "content": "[38] Yi Yang, Xiaoxuan He, Hongkun Pan, Xiyan Jiang, Yan Deng, Xingtao Yang, Haoyu Lu, Dacheng Yin, Fengyun Rao, Minfeng Zhu, Bo Zhang, and Wei Chen. R1-onevision: Advancing generalized multimodal reasoning through cross-modal formalization. arXiv preprint arXiv:2503.10615, 2025." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 166, + 506, + 211 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 166, + 506, + 211 + ], + "spans": [ + { + "bbox": [ + 106, + 166, + 506, + 211 + ], + "type": "text", + "content": "[39] Huanjin Yao, Jiaxing Huang, Wenhao Wu, Jingyi Zhang, Yibo Wang, Shunyu Liu, Yingjie Wang, Yuxin Song, Haocheng Feng, Li Shen, et al. Mulberry: Empowering mllm with o1-like reasoning and reflection via collective monte carlo tree search. arXiv preprint arXiv:2412.18319, 2024." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 219, + 506, + 254 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 219, + 506, + 254 + ], + "spans": [ + { + "bbox": [ + 106, + 219, + 506, + 254 + ], + "type": "text", + "content": "[40] Yichen Gong, Delong Ran, Jinyuan Liu, Conglei Wang, Tianshuo Cong, Anyu Wang, Sisi Duan, and Xiaoyun Wang. Figstep: Jailbreaking large vision-language models via typographic visual prompts. arXiv preprint arXiv:2311.05608, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 261, + 504, + 295 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 261, + 504, + 295 + ], + "spans": [ + { + "bbox": [ + 106, + 261, + 504, + 295 + ], + "type": "text", + "content": "[41] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. Gpt-4 technical report. arXiv preprint arXiv:2303.08774, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 303, + 506, + 337 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 303, + 506, + 337 + ], + "spans": [ + { + "bbox": [ + 106, + 303, + 506, + 337 + ], + "type": "text", + "content": "[42] Robin Rombach, Andreas Blattmann, Dominik Lorenz, Patrick Esser, and Björn Ommer. High-resolution image synthesis with latent diffusion models. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10684-10695, 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 345, + 506, + 379 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 345, + 506, + 379 + ], + "spans": [ + { + "bbox": [ + 106, + 345, + 506, + 379 + ], + "type": "text", + "content": "[43] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Jasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 386, + 504, + 410 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 386, + 504, + 410 + ], + "spans": [ + { + "bbox": [ + 106, + 386, + 504, + 410 + ], + "type": "text", + "content": "[44] Yu Wang, Xiaofei Zhou, Yichen Wang, Geyuan Zhang, and Tianxing He. Jailbreak large visual language models through multi-modal linkage. arXiv preprint arXiv:2412.00473, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 417, + 506, + 452 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 417, + 506, + 452 + ], + "spans": [ + { + "bbox": [ + 106, + 417, + 506, + 452 + ], + "type": "text", + "content": "[45] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 459, + 292, + 472 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 459, + 292, + 472 + ], + "spans": [ + { + "bbox": [ + 106, + 459, + 292, + 472 + ], + "type": "text", + "content": "[46] Qwen Team. Qwen2.5-vl, January 2025." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 479, + 506, + 534 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 479, + 506, + 534 + ], + "spans": [ + { + "bbox": [ + 106, + 479, + 506, + 534 + ], + "type": "text", + "content": "[47] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, Yang Fan, Kai Dang, Mengfei Du, Xuancheng Ren, Rui Men, Dayiheng Liu, Chang Zhou, Jingren Zhou, and Junyang Lin. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 542, + 504, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 542, + 504, + 577 + ], + "spans": [ + { + "bbox": [ + 106, + 542, + 504, + 577 + ], + "type": "text", + "content": "[48] Bo Li, Kaichen Zhang, Hao Zhang, Dong Guo, Renrui Zhang, Feng Li, Yuanhan Zhang, Ziwei Liu, and Chunyuan Li. Llava-last: Stronger llms supercharge multimodal capabilities in the wild, May 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 585, + 506, + 608 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 585, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 106, + 585, + 506, + 608 + ], + "type": "text", + "content": "[49] Zheng Yaowei, Lu Junting, Wang Shenzhi, Feng Zhangchi, Kuang Dongdong, and Xiong Yuwen. Easyr1: An efficient, scalable, multi-modality rl training framework. 2025." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 615, + 506, + 649 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 615, + 506, + 649 + ], + "spans": [ + { + "bbox": [ + 106, + 615, + 506, + 649 + ], + "type": "text", + "content": "[50] Yangyi VAI. R1-onevision: Open-source multimodal large language model with reasoning. Online, 2025. Available at: https://yangyi-vali.notion.site/r1-onevision#198b1e4047f780c78306fb451be7160d." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 657, + 504, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 657, + 504, + 680 + ], + "spans": [ + { + "bbox": [ + 106, + 657, + 504, + 680 + ], + "type": "text", + "content": "[51] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. 2025. Accessed: 2025-02-02." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 688, + 506, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 688, + 506, + 722 + ], + "spans": [ + { + "bbox": [ + 106, + 688, + 506, + 722 + ], + "type": "text", + "content": "[52] Peng Yingzhe, Zhang Gongrui, Zhang Miaosen, You Zhiyuan, Liu Jie, Zhu Qipeng, Yang Kai, Xu Xingzhong, Geng Xin, and Yang Xu. Lmm-r1: Empowering 3b lmm's with strong reasoning abilities through two-stage rule-based rl. 2025." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 186 + ], + "type": "list", + "angle": 0, + "index": 3, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "text", + "content": "[53] Aitor Arrieta, Miriam Ugarte, Pablo Valle, José Antonio Parejo, and Sergio Segura. O3-mini vs deepseek-r1: Which one is safer? arXiv preprint arXiv:2501.18438, 2025." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 102, + 505, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 102, + 505, + 145 + ], + "spans": [ + { + "bbox": [ + 106, + 102, + 505, + 145 + ], + "type": "text", + "content": "[54] Mantas Mazeika, Long Phan, Xuwang Yin, Andy Zou, Zifan Wang, Norman Mu, Elham Sakhaaee, Nathaniel Li, Steven Basart, Bo Li, et al. Harmbench: A standardized evaluation framework for automated red teaming and robust refusal. arXiv preprint arXiv:2402.04249, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 153, + 505, + 186 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 153, + 505, + 186 + ], + "spans": [ + { + "bbox": [ + 106, + 153, + 505, + 186 + ], + "type": "text", + "content": "[55] Kaiwen Zhou, Chengzhi Liu, Xuandong Zhao, Shreedhar Jangam, Jayanth Srinivasa, Gaowen Liu, Dawn Song, and Xin Eric Wang. The hidden risks of large reasoning models: A safety assessment of r1. arXiv preprint arXiv:2502.12659, 2025." + } + ] + } + ], + "index": 2 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file